static void reactor_logicmsg_handle(struct net_reactor* reactor, pfn_nrmgr_logicmsg callback, int64_t timeout) { struct nrmgr_net_msg** msg_pp = NULL; struct nr_mgr* mgr = reactor->mgr; struct rwlist_s* logic_msglist = reactor->logic_msglist; int64_t current_time = ox_getnowtime(); const int64_t end_time = current_time+timeout; struct rwlist_msg_data msg; struct rwlist_s* rwlist = reactor->fromlogic_rwlist; msg.msg_type = RMT_REQUEST_FREENETMSG; while(true) { msg_pp = (struct nrmgr_net_msg**)ox_rwlist_pop(logic_msglist, end_time-current_time); current_time = ox_getnowtime(); if(msg_pp != NULL) { (callback)(mgr, *msg_pp); msg.data.free.msg = *msg_pp; ox_rwlist_push(rwlist, &msg); } else { if(current_time >= end_time) { break; } } } }
static void epollserver_poll(struct server_s* self, int64_t timeout) { #ifdef PLATFORM_LINUX struct epollserver_s* epollserver = (struct epollserver_s*)self; int epollfd = epollserver->epoll_fd; struct epoll_event events[MAX_EVENTS]; int64_t current_time = ox_getnowtime(); const int64_t end_time = current_time + timeout; do { int i = 0; int nfds = 0; nfds = epoll_wait(epollfd, events, MAX_EVENTS, end_time - current_time); if(-1 == nfds) { if(S_EINTR == sErrno) { continue; } else { break; } } for(i = 0; i < nfds; ++i) { struct session_s* session = (struct session_s*)(events[i].data.ptr); uint32_t event_data = events[i].events; if(session->status == session_status_connect) { if(event_data & EPOLLRDHUP) { epollserver_halfclose_session(session->server, session); (*self->logic_on_close)(self, session->ud); } else { if(event_data & EPOLLIN) { epoll_recvdata_callback(session); } if(event_data & EPOLLOUT) { epoll_handle_onoutevent(session); } } } } current_time = ox_getnowtime(); }while(end_time > current_time); #endif }
void ox_connection_netpoll(struct connection_s* self, int64_t millisecond) { if(millisecond < 0) { millisecond = 0; } /* 网络层调度函数:如果当前状态是未链接则尝试取出消息并链接服务器,否则处理来自逻辑层的消息队列 */ if(self->status == connection_none) { ox_thread_sleep(millisecond); connection_proclist_handle(self); } else { int64_t current_time = ox_getnowtime(); const int64_t end_time = current_time + millisecond; do { if(ox_fdset_poll(self->fdset, end_time-current_time) > 0) { if(ox_fdset_check(self->fdset, self->fd, ReadCheck)) { connection_read_handle(self); } if(self->fd != SOCKET_ERROR && ox_fdset_check(self->fdset, self->fd, WriteCheck)) { self->writable = true; ox_fdset_del(self->fdset, self->fd, WriteCheck); } } if(self->writable) { connection_sendmsg_handle(self); } current_time = ox_getnowtime(); }while(end_time > current_time); connection_proclist_handle(self); } ox_rwlist_flush(self->netmsg_list); connection_free_netmsglist(self); }
int ox_timer_mgr_add(struct timer_mgr_s* self, fpn_ox_timer_handler callback, int64_t delay, void* arg) { int id = ERROR_COMPOSITOR_INDEX; int* free_id = NULL; struct timeaction_node_s* node = NULL; if(callback == NULL) { return id; } free_id = (int*)ox_stack_popback(self->free_node_ids); if(free_id == NULL) { timeaction_mgr_init(self, 2*self->node_num); free_id = (int*)ox_stack_popback(self->free_node_ids); } if(free_id != NULL) { id = *free_id; node = self->node_array+id; node->callback = callback; node->arg = arg; if(delay > 0) { node->left_time = (delay+ox_getnowtime()); } else { node->left_time = ox_getnowtime() - (-delay); } ox_heap_insert(self->compositor_heap, &(node->index)); } return id; }
void ox_connection_logicpoll(struct connection_s* self, int64_t millisecond) { /* 逻辑层调度函数:处理来自网络层投递的网络消息,以及释放逻辑层投递出的消息 */ struct rwlist_s* logicmsg_list = self->netmsg_list; struct msg_data_s** msg_p = NULL; struct msg_data_s* msg = NULL; pfn_packet_handle handle = self->handle; void* ext = self->ext; struct rwlist_s* free_netmsg_list = self->free_netmsg_list; int64_t current_time = ox_getnowtime(); int64_t end_time = current_time; if(millisecond < 0) { millisecond = 0; } end_time += millisecond; do { msg_p = (struct msg_data_s**)ox_rwlist_pop(logicmsg_list, end_time-current_time); if(msg_p != NULL) { msg = *msg_p; (handle)(self, msg, ext); ox_rwlist_push(free_netmsg_list, msg_p); current_time = ox_getnowtime(); } else { break; } }while(end_time > current_time); ox_rwlist_flush(self->sendmsg_list); connection_free_sendmsglist(self); }
int main() { struct nr_mgr* mgr = ox_create_nrmgr(1, PACKET_LEN*2, s_check); int old = ox_getnowtime(); ox_thread_new(listen_thread, mgr); while(true) { ox_nrmgr_logic_poll(mgr, msg_handle, 5); { int now = ox_getnowtime(); if((now - old) >= 1000) { printf("recv %d K/s \n", totaol_recv/1024); old = now; totaol_recv = 0; } } } return 0; }
void ox_timer_mgr_schedule(struct timer_mgr_s* self) { int64_t now = ox_getnowtime(); fpn_ox_timer_handler callback = NULL; void* arg = NULL; struct heap_s* compositor_heap = self->compositor_heap; while(true) { int* nodepp = (int*)ox_heap_top(compositor_heap); struct timeaction_node_s* node = NULL; if(nodepp == NULL) { break; } node = self->node_array+*nodepp; callback = node->callback; if(callback == NULL) { ox_heap_pop(compositor_heap); ox_stack_push(self->free_node_ids, &node->index); } else { if(node->left_time > now) { break; } ox_heap_pop(compositor_heap); arg = node->arg; ox_stack_push(self->free_node_ids, &node->index); (callback)(arg); } } }
int main(int argc, char** argv) { if (argc != 2) { fprintf(stderr, "Usage : <listen port> \n"); exit(-1); } int port_num = atoi(argv[1]); std::atomic_llong total_send_len = ATOMIC_VAR_INIT(0); std::atomic_llong total_recv_len = ATOMIC_VAR_INIT(0); std::atomic_llong send_packet_num = ATOMIC_VAR_INIT(0); std::atomic_llong recv_packet_num = ATOMIC_VAR_INIT(0); ox_socket_init(); int total_client_num = 0; /* 用于网络IO线程发送消息给逻辑线程的消息队列,当网络线程的回调函数push消息后,需要wakeup主线程 */ /* 当然,TcpServer的各个回调函数中可以自己处理消息,而不必发送到msgList队列 */ MsgQueue<NetMsg*> msgList; EventLoop mainLoop; TcpService t; t.startListen(port_num, 1024 * 1024, nullptr, nullptr); t.startWorkerThread(1, [&](EventLoop& l){ /*每帧回调函数里强制同步rwlist*/ lockStatistics(); msgList.ForceSyncWrite(); unLockStatistics(); if (msgList.SharedListSize() > 0) { mainLoop.wakeup(); } }); t.setEnterCallback([&](int64_t id, std::string ip){ NetMsg* msg = new NetMsg(NMT_ENTER, id); lockStatistics(); msgList.Push(msg); unLockStatistics(); mainLoop.wakeup(); }); t.setDisconnectCallback([&](int64_t id){ NetMsg* msg = new NetMsg(NMT_CLOSE, id); lockStatistics(); msgList.Push(msg); unLockStatistics(); mainLoop.wakeup(); }); t.setDataCallback([&](int64_t id, const char* buffer, size_t len){ const char* parse_str = buffer; size_t total_proc_len = 0; int left_len = len; while (true) { bool flag = false; if (left_len >= sizeof(sizeof(uint16_t) + sizeof(uint16_t))) { ReadPacket rp(parse_str, left_len); uint16_t packet_len = rp.readINT16(); if (left_len >= packet_len && packet_len >= (sizeof(uint16_t) + sizeof(uint16_t))) { NetMsg* msg = new NetMsg(NMT_RECV_DATA, id); msg->setData(parse_str, packet_len); lockStatistics(); msgList.Push(msg); unLockStatistics(); total_proc_len += packet_len; parse_str += packet_len; left_len -= packet_len; flag = true; } } if (!flag) { break; } } return total_proc_len; }); /* 主线程处理msgList消息队列 */ int64_t lasttime = ox_getnowtime(); int total_count = 0; std::vector<int64_t> sessions; while (true) { mainLoop.loop(10); msgList.SyncRead(0); NetMsg* msg = nullptr; while (msgList.ReadListSize() > 0) { bool ret = msgList.PopFront(&msg); if (ret) { if (msg->mType == NMT_ENTER) { printf("client %lld enter \n", msg->mID); total_client_num++; sessions.push_back(msg->mID); } else if (msg->mType == NMT_CLOSE) { printf("client %lld close \n", msg->mID); for (auto it = sessions.begin(); it != sessions.end(); ++it) { if (*it == msg->mID) { sessions.erase(it); break; } } total_client_num--; } else if (msg->mType == NMT_RECV_DATA) { DataSocket::PACKET_PTR packet = DataSocket::makePacket(msg->mData.c_str(), msg->mData.size()); recv_packet_num++; total_recv_len += msg->mData.size(); for (size_t i = 0; i < sessions.size(); ++i) { t.send(sessions[i], packet); send_packet_num++; total_send_len += msg->mData.size(); } } else { assert(false); } delete msg; msg = nullptr; } else { break; } } int64_t now = ox_getnowtime(); if ((now - lasttime) >= 1000) { std::cout << "clientnum:" << total_client_num << ", recv" << (total_recv_len / 1024) << " K/s, " << "num : " << recv_packet_num << ", send " << (total_send_len / 1024) / 1024 << " M/s, " << " num: " << send_packet_num << std::endl; lasttime = now; total_recv_len = 0; total_send_len = 0; recv_packet_num = 0; send_packet_num = 0; } } t.closeService(); }