文章目录

  • ​​1.epoll​​
  • ​​2.epoll LT​​
  • ​​3.epoll ET​​

1.epoll

  • epoll
#include <sys/epoll.h>
int epoll_create(int size);
int epoll_create1(int flags);

int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event);
int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout);

联合体,随便用,联合体最大是uint64_t ,所以是8个字节
typedef union epoll_data {
void *ptr;
int fd;4字节
uint32_t u32;
uint64_t u64;
} epoll_data_t;

struct epoll_event {
uint32_t events; /* Epoll events */
epoll_data_t data; /* User data variable */对哪个fd进行关注,填在这里
};
  • 两种触发模式
    (1)Level-Triggered,与poll类似
    (2)Edge-Triggered,边沿触发
  • 如果采用Level-Triggered,那什么时候关注EPOLLOUT事件?会不会造成busy-loop?
    看poll((P4)poll:poll使用的基本流程 ,EMFILE处理 ,cmake),epoll的LT与poll一样

2.epoll LT

  • epoll LT图

    -eg:05\echosrv_epoll.cpp
#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/epoll.h>

#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>

#include <vector>
#include <algorithm>
#include <iostream>

typedef std::vector<struct epoll_event> EventList;

#define ERR_EXIT(m) \
do \
{ \
perror(m); \
exit(EXIT_FAILURE); \
} while(0)

int main(void)
{
signal(SIGPIPE, SIG_IGN);
signal(SIGCHLD, SIG_IGN);

int idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
int listenfd;
//if ((listenfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0)
if ((listenfd = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP)) < 0)
ERR_EXIT("socket");

struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(5188);
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);

int on = 1;
if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0)
ERR_EXIT("setsockopt");

if (bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0)
ERR_EXIT("bind");
if (listen(listenfd, SOMAXCONN) < 0)
ERR_EXIT("listen");

std::vector<int> clients;
int epollfd; //epoll_create(能够处理的fd的个数,这个数随便填,系统资源能管多少fd,他就有多少,就能处理多大的并发)
epollfd = epoll_create1(EPOLL_CLOEXEC);//man epoll_create,epoll_create1=epoll_create+fcntl+EPOLL_CLOEXEC

struct epoll_event event;
event.data.fd = listenfd;
event.events = EPOLLIN/* |默认是LT模式。 EPOLLET*/;
epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &event);//将listenfd添加到epollfd进行关注,以及所关注的事件添加到epollfd进行管理

//初始状态里面要有关注的事件
EventList events(16);//初始化一个事件列表
struct sockaddr_in peeraddr;
socklen_t peerlen;
int connfd;

int nready;
while (1)
{
//&*events.begin()第一个元素的地址
//static_cast<int>(events.size()):enents大小
//-1:表示等待
//返回的事件都放在&*events.begin(),这仅仅是个输出参数,不需要传递关注的事件。关注的事件由epoll_ctl来传递了,由epollfd来管理,
//而poll是一个输入输出参数,要把里面的数据拷贝到内核,就多了数据拷贝
/*
这块内容在poll中是应用层的数组&*events.begin()管理的,现在由内核取管理了,内核开辟了一个数据结构,将关注的事件放到这个数据结构中
epollfd = epoll_create1(EPOLL_CLOEXEC);
。。。
epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &event)
所以,epoll的&*events.begin()只是一个输出参数,意味着:每次等待的时候,不需要每次把我们要关注的事件拷贝到内核,不需要每次从用户空间将数据拷贝到
内核空间
*/
//nready:返回的事件个数
nready = epoll_wait(epollfd, &*events.begin(), static_cast<int>(events.size()), -1);//相当于poll
if (nready == -1)
{
if (errno == EINTR)
continue;

ERR_EXIT("epoll_wait");
}
if (nready == 0) // nothing happended
continue;

//返回的事件个数=16,说明空间不够了。就成倍扩大
if ((size_t)nready == events.size())
events.resize(events.size()*2);

for (int i = 0; i < nready; ++i)
{
//这里的events都是活跃的fd
if (events[i].data.fd == listenfd)
{
peerlen = sizeof(peeraddr);
connfd = ::accept4(listenfd, (struct sockaddr*)&peeraddr,
&peerlen, SOCK_NONBLOCK | SOCK_CLOEXEC);

if (connfd == -1)
{
if (errno == EMFILE)
{
close(idlefd);
idlefd = accept(listenfd, NULL, NULL);
close(idlefd);
idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
continue;
}
else
ERR_EXIT("accept4");
}


std::cout<<"ip="<<inet_ntoa(peeraddr.sin_addr)<<
" port="<<ntohs(peeraddr.sin_port)<<std::endl;

clients.push_back(connfd);

event.data.fd = connfd;
event.events = EPOLLIN/* | EPOLLET*/;
epoll_ctl(epollfd, EPOLL_CTL_ADD, connfd, &event);//将connfd加入关注EPOLL_CTL_ADD
}
//处理完listenfd,就处理connfd
else if (events[i].events & EPOLLIN)
{
connfd = events[i].data.fd;
if (connfd < 0)
continue;

char buf[1024] = {0};
int ret = read(connfd, buf, 1024);
if (ret == -1)
ERR_EXIT("read");
if (ret == 0)//对方关闭
{
std::cout<<"client close"<<std::endl;
close(connfd);
event = events[i];
epoll_ctl(epollfd, EPOLL_CTL_DEL, connfd, &event);//剔除connfd,下次不关注
clients.erase(std::remove(clients.begin(), clients.end(), connfd), clients.end());
continue;
}

std::cout<<buf;
write(connfd, buf, strlen(buf));
}

}
}

return 0;
}

05\echocli.cpp

#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>

#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>

#include <iostream>

#define ERR_EXIT(m) \
do \
{ \
perror(m); \
exit(EXIT_FAILURE); \
} while(0)

int main(void)
{
int sock;
if ((sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0)
ERR_EXIT("socket");

struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(5188);
servaddr.sin_addr.s_addr = inet_addr("127.0.0.1");

if (connect(sock, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0)
ERR_EXIT("connect");

struct sockaddr_in localaddr;
socklen_t addrlen = sizeof(localaddr);
if (getsockname(sock, (struct sockaddr*)&localaddr, &addrlen) < 0)
ERR_EXIT("getsockname");

std::cout<<"ip="<<inet_ntoa(localaddr.sin_addr)<<
" port="<<ntohs(localaddr.sin_port)<<std::endl;

char sendbuf[1024] = {0};
char recvbuf[1024] ={0};
while (fgets(sendbuf, sizeof(sendbuf), stdin) != NULL)
{
write(sock, sendbuf, strlen(sendbuf));
read(sock, recvbuf, sizeof(recvbuf));

fputs(recvbuf, stdout);
memset(sendbuf, 0, sizeof(sendbuf));
memset(recvbuf, 0, sizeof(recvbuf));
}

close(sock);

return 0;
}

05\CMakeLists.txt

cmake_minimum_required(VERSION 2.6)

project(pas CXX)

set(CXX_FLAGS -g -Wall)
set(CMAKE_CXX_COMPILER "g++")
string(REPLACE ";" " " CMAKE_CXX_FLAGS "${CXX_FLAGS}")

set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)

add_executable(echosrv_poll echosrv_poll.cpp)
add_executable(echosrv_epoll echosrv_epoll.cpp)
add_executable(echocli echocli.cpp)

05\build.sh

#!/bin/sh

set -x

SOURCE_DIR=`pwd`
BUILD_DIR=${BUILD_DIR:-../build}

mkdir -p $BUILD_DIR \
&& cd $BUILD_DIR \
&& cmake $SOURCE_DIR \
&& make $*
  • poll模型:每次调用poll函数的时候,都需要把监听fd和连接fd所感兴趣的事件数组拷贝到内核,数据拷贝是服务器性能杀手之一
  • 而epoll则拷贝一次就行了,你关注的事件的fd都由epollfd来管理了,在内核中已经有数据来维护了,不需要再传递了,而poll每次都要传递进去,所以效率低
  • 测试:
  • (P5)epoll:epoll ,epoll LT ,epoll ET_数据


  • (P5)epoll:epoll ,epoll LT ,epoll ET_TCP_02

3.epoll ET

  • epoll ET图
  • epoll ET模式的EMFILE问题