操作系统和内核版本决定事件驱动机制,不同的操作系统对应不同的事件驱动机制,在Linux 2.6之后使用epoll机制,对应的事件驱动模块是ngx_epoll_module。Nginx的ngx_event_core_module模块根据操作系统确定使用哪一个事件驱动模块。
根据宏定义是否有值,确实使用哪种事件驱动模块,例如在编译后的文件NGINX/NGINX-1.6.2/objs/ngx_auto_config.h查找对应的宏定义,NGX_HAVE_EPOLL值为1,则使用epoll驱动。对应代码:
ngx_event_module_t ngx_event_core_module_ctx = {
&event_core_name,
ngx_event_core_create_conf, /* create configuration */
ngx_event_core_init_conf, /* init configuration */
{ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
};
ngx_module_t ngx_event_core_module = {
NGX_MODULE_V1,
&ngx_event_core_module_ctx, /* module context */
ngx_event_core_commands, /* module directives */
NGX_EVENT_MODULE, /* module type */
NULL, /* init master */
ngx_event_module_init, /* init module */
ngx_event_process_init, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static char *
ngx_event_core_init_conf(ngx_cycle_t *cycle, void *conf)
{
ngx_event_conf_t *ecf = conf;
int fd;
ngx_uint_t rtsig;
ngx_core_conf_t *ccf;
ngx_int_t i;
ngx_module_t *module;
ngx_event_module_t *event_module;
module = NULL;
fd = epoll_create(100);
if (fd != -1) {
(void) close(fd);
module = &ngx_epoll_module;
} else if (ngx_errno != NGX_ENOSYS) {
module = &ngx_epoll_module;
}
if (module == NULL) {
module = &ngx_rtsig_module;
rtsig = 1;
} else {
rtsig = 0;
}
module = &ngx_devpoll_module;
module = &ngx_kqueue_module;
if (module == NULL) {
module = &ngx_select_module;
}
if (module == NULL) {
for (i = 0; ngx_modules[i]; i++) {
if (ngx_modules[i]->type != NGX_EVENT_MODULE) {
continue;
}
event_module = ngx_modules[i]->ctx;
if (ngx_strcmp(event_module->name->data, event_core_name.data) == 0)
{
continue;
}
module = ngx_modules[i];
break;
}
}
if (module == NULL) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0, "no events module found");
return NGX_CONF_ERROR;
}
ngx_conf_init_uint_value(ecf->connections, DEFAULT_CONNECTIONS);
cycle->connection_n = ecf->connections;
ngx_conf_init_uint_value(ecf->use, module->ctx_index);
event_module = module->ctx;
ngx_conf_init_ptr_value(ecf->name, event_module->name->data);
ngx_conf_init_value(ecf->multi_accept, 0);
ngx_conf_init_value(ecf->accept_mutex, 0);
ngx_conf_init_msec_value(ecf->accept_mutex_delay, 500);
if (!rtsig) {
return NGX_CONF_OK;
}
if (ecf->accept_mutex) {
return NGX_CONF_OK;
}
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
if (ccf->worker_processes == 0) {
return NGX_CONF_OK;
}
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
"the \"rtsig\" method requires \"accept_mutex\" to be on");
return NGX_CONF_ERROR;
return NGX_CONF_OK;
}
事件驱动模块在ngx_module_t的ctx通用接口是ngx_event_module_t,定义如下所示:
typedef struct {
ngx_str_t *name;
void *(*create_conf)(ngx_cycle_t *cycle);
char *(*init_conf)(ngx_cycle_t *cycle, void *conf);
ngx_event_actions_t actions;
} ngx_event_module_t;
typedef struct {
ngx_int_t (*add)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);
ngx_int_t (*del)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);
ngx_int_t (*enable)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);
ngx_int_t (*disable)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);
ngx_int_t (*add_conn)(ngx_connection_t *c);
ngx_int_t (*del_conn)(ngx_connection_t *c, ngx_uint_t flags);
ngx_int_t (*process_changes)(ngx_cycle_t *cycle, ngx_uint_t nowait);
ngx_int_t (*process_events)(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags);
ngx_int_t (*init)(ngx_cycle_t *cycle, ngx_msec_t timer);
void (*done)(ngx_cycle_t *cycle);
} ngx_event_actions_t;
事件由ngx_event_t结构体定义:
struct ngx_event_s{
void *data;
unsigned write:1;
unsigned accept:1;
/* used to detect the stale events in kqueue, rtsig, and epoll */
unsigned instance:1;
/*
* the event was passed or would be passed to a kernel;
* in aio mode - operation was posted.
*/
unsigned active:1;
unsigned disabled:1;
/* the ready event; in aio mode 0 means that no operation can be posted */
unsigned ready:1;
unsigned oneshot:1;
/* aio operation is complete */
unsigned complete:1;
unsigned eof:1;
unsigned error:1;
unsigned timedout:1;
unsigned timer_set:1;
unsigned delayed:1;
unsigned deferred_accept:1;
/* the pending eof reported by kqueue, epoll or in aio chain operation */
unsigned pending_eof:1;
unsigned posted_ready:1;
/* setsockopt(SO_UPDATE_ACCEPT_CONTEXT) was successful */
unsigned accept_context_updated:1;
unsigned kq_vnode:1;
/* the pending errno reported by kqueue */
int kq_errno;
/*
* kqueue only:
* accept: number of sockets that wait to be accepted
* read: bytes to read when event is ready
* or lowat when event is set with NGX_LOWAT_EVENT flag
* write: available space in buffer when event is ready
* or lowat when event is set with NGX_LOWAT_EVENT flag
*
* iocp: TODO
*
* otherwise:
* accept: 1 if accept many, 0 otherwise
*/
int available;
unsigned available:1;
ngx_event_handler_pt handler;
ngx_event_ovlp_t ovlp;
struct aiocb aiocb;
ngx_uint_t index;
ngx_log_t *log;
ngx_rbtree_node_t timer;
unsigned closed:1;
/* to test on worker exit */
unsigned channel:1;
unsigned resolver:1;
unsigned locked:1;
unsigned posted_ready:1;
unsigned posted_timedout:1;
unsigned posted_eof:1;
/* the pending errno reported by kqueue */
int posted_errno;
int posted_available;
unsigned posted_available:1;
ngx_atomic_t *lock;
ngx_atomic_t *own_lock;
/* the links of the posted queue */
ngx_event_t *next;
ngx_event_t **prev;
/* the threads support */
/*
* the event thread context, we store it here
* if $(CC) does not understand __thread declaration
* and pthread_getspecific() is too costly
*/
void *thr_ctx;
/* event should not cross cache line in SMP */
uint32_t padding[NGX_EVENT_T_PADDING];
}
在ngx_event_t中,我们最关心handler成员,它决定了它所属的事件发生时的处理方法。