使用到kqueue的代码
bool append_connection_to_reactor(Connection * connection) { struct kevent event[2]; struct timespec wait_timeout = { 0, 0 }; EV_SET(&event[0], connection->get_socket(), EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, connection); EV_SET(&event[1], connection->get_socket(), EVFILT_WRITE, EV_ADD | EV_DISABLE, 0, 0, connection); if (-1 == kevent(m_reactor, event, sizeof(event) / sizeof(event[0]), nullptr, 0, &wait_timeout)) { return(false); } return(true); }
bool delete_connection_from_reactor(Connection * connection) { struct kevent event[2]; struct timespec wait_timeout = { 0, 0 }; EV_SET(&event[0], connection->get_socket(), EVFILT_READ, EV_DELETE, 0, 0, connection); EV_SET(&event[1], connection->get_socket(), EVFILT_WRITE, EV_DELETE, 0, 0, connection); if (-1 == kevent(m_reactor, event, sizeof(event) / sizeof(event[0]), nullptr, 0, &wait_timeout)) { return(false); } return(true); }
bool modify_connection_of_reactor(Connection * connection, bool need_send, bool need_recv) { struct kevent event[2]; struct timespec wait_timeout = { 0, 0 }; EV_SET(&event[0], connection->get_socket(), EVFILT_READ, (need_recv ? EV_ENABLE : EV_DISABLE), 0, 0, connection); EV_SET(&event[1], connection->get_socket(), EVFILT_WRITE, (need_send ? EV_ENABLE : EV_DISABLE), 0, 0, connection); if (-1 == kevent(m_reactor, event, sizeof(event) / sizeof(event[0]), nullptr, 0, &wait_timeout)) { return(false); } return(true); }
void reactor_connection_process(int fkqueue, Connection * listener) { const size_t max_event_count = 256; struct kevent connection_events[max_event_count]; struct timespec wait_timeout = { 1, 0 }; // seconds while (running()) { int event_count = kevent(fkqueue, nullptr, 0, connection_events, max_event_count, &wait_timeout); if (-1 == event_count) { if (-1 == fkqueue) { break; } if (is_net_blocking_error()) { continue; } else { printf("kevent failed: %d\n", net_error()); break; } } else if (0 == event_count) { continue; } for (int index = 0; index < event_count; ++index) { struct kevent & connection_event = connection_events[index]; Connection * connection = reinterpret_cast<Connection *>(connection_event.udata); if (listener == connection)) { /* notify to accept */ Connection * new_connection = do_accept(listener); append_connection_to_reactor(new_connection); } else { if (EVFILT_READ == connection_event.filter) { /* notify to recv */ do_recv(connection); } else if (EVFILT_WRITE == connection_event.filter) { /* notify to send */ do_send(connection); if (send_buffer_is_empty()) /* nothing need to send */ { bool need_send = false; bool need_recv = true; modify_connection_of_reactor(Connection * connection, need_send, need_recv); } } } } } } }
本人写了个滚雪球一样的测试程序(单个数据越来越大),当最大数据为30 * 20 bytes时,正常。
当最大数据设定到100 * 20 bytes时,机器就会立即自动异常重启(不论作为请求方还是响应方),看启动报告,是内核崩溃了
用了两台OS X 10.11测试,都是一样的,一设定到100 * 20 bytes 的数据量,就直接重启了
不知道,有没有谁碰到过相似的情况。
确定代码其他地方没问题,原因是代码原本是用epoll来实现的,改成OS X版本,只是将epoll相关函数改成kevent相关函数
也就是上述代码是不同的,其他代码应该是没问题的
关于kqueue的用法,不是特别了解,按本人现有的理解和epoll应该差不多,但是遇到这个问题,本人又不敢肯定了
欢迎指出kqueue的用法不当的地方。
解决方案
200
OS X下有gdb吗?