static void client_decref(struct client_info *ci) { if (ci && __sync_sub_and_fetch(&ci->refcnt, 1) == 0) { destroy_client(ci); return; } }
static void cleanup(void) { while(client) destroy_client(client); ixp_server_close(&srv); close(sleeperfd); }
static void register_client(client_t *c, srs_req_register_t *req) { static srs_client_ops_t ops = { .notify_focus = focus_notify, .notify_command = command_notify, .notify_render = voice_notify, }; srs_context_t *srs = c->s->self->srs; char *name = req->name; char *appcls = req->appclass; char **cmds = req->commands; int ncmd = req->ncommand; char id[64]; snprintf(id, sizeof(id), "native-client-%d", c->id); mrp_debug("received register request from native client #%d", c->id); c->c = client_create(srs, SRS_CLIENT_TYPE_EXTERNAL, name, appcls, cmds, ncmd, id, &ops, c); if (c->c != NULL) reply_register(c, req->reqno, SRS_STATUS_OK, "OK"); else { reply_register(c, req->reqno, SRS_STATUS_FAILED, "failed"); destroy_client(c); } }
//---------------------------------------------------------------------------// void CAddInNative::Done() { destroy_client(); IAddInDefBaseEx* cnn = (IAddInDefBaseEx*)m_iConnect; IMsgBox* imsgbox = (IMsgBox*)cnn->GetInterface(eIMsgBox); imsgbox->Alert(L"Спасибо!!!"); }
static bool_t svc_auth_gssapi_destroy(SVCAUTH *auth) { svc_auth_gssapi_data *client_data = SVCAUTH_PRIVATE(auth); destroy_client(client_data); return TRUE; }
static void unregister_client(client_t *c, srs_req_unregister_t *req) { mrp_debug("received unregister request from native client #%d", c->id); reply_unregister(c, req->reqno, SRS_STATUS_OK, "OK"); destroy_client(c); }
void destroy_tcp_client ( struct client *client ) { int status; if ( CASDEBUG > 0 ) { errlogPrintf ( "CAS: Connection %d Terminated\n", client->sock ); } if ( client->evuser ) { /* * turn off extra labor callbacks from the event thread */ status = db_add_extra_labor_event ( client->evuser, NULL, NULL ); assert ( ! status ); /* * wait for extra labor in progress to comple */ db_flush_extra_labor_event ( client->evuser ); } destroyAllChannels ( client, & client->chanList ); destroyAllChannels ( client, & client->chanPendingUpdateARList ); if ( client->evuser ) { db_close_events (client->evuser); } destroy_client ( client ); }
static void clean_client(void) { svc_auth_gssapi_data *client_data; client_list *c; PRINTF(("clean_client: starting\n")); c = clients; while (c) { client_data = c->client; L_PRINTF(2, ("clean_client: client_data = %p\n", (void *) client_data)); if (client_data->expiration < time(0)) { PRINTF(("clean_client: client %d expired\n", client_data->key)); destroy_client(client_data); c = clients; /* start over, just to be safe */ } else { c = c->next; } } PRINTF(("clean_client: done\n")); }
int main(int argc, char *argv[]) { client_t *c; c = create_client(argv[0]); if (c == NULL) { fprintf(stderr, "Failed to create client."); exit(1); } parse_cmdline(c, argc, &argv[0]); create_mainloop(c); setup_signals(c); setup_input(c); if (c->glib) print(c, "Using GMainLoop..."); else print(c, "Using pa_manloop..."); run_mainloop(c); cleanup_input(c); destroy_client(c); return 0; }
int cliser_server_client_close(lua_State *L) { server_client_t *server_client = (server_client_t *)lua_touserdata(L, 1); if (server_client->client == NULL) return LUA_HANDLE_ERROR_STR(L, "server client is invalid, either closed or used outside of server function scope"); remove_client(server_client->server, server_client->client); destroy_client(L, server_client->client); server_client->server = NULL; server_client->client = NULL; return 0; }
int cliser_client_close(lua_State *L) { client_t **client = (client_t **)lua_touserdata(L, 1); if (*client) { (*client)->ref_count--; if ((*client)->ref_count == 0) { destroy_client(L, *client); } (*client) = NULL; } return 0; }
nvqrReturn_t nvqr_disconnect(NVQRConnection *connection) { if (disconnect_from_server(*connection)) { close_client_connection(*connection); destroy_client(*connection); close_server_connection(connection->server_handle); free(connection->process_name); return NVQR_SUCCESS; } return NVQR_ERROR_UNKNOWN; }
nvqrReturn_t nvqr_connect(NVQRConnection *connection, pid_t pid) { memset(connection, 0, sizeof(*connection)); connection->pid = pid; connection->process_name = process_name_from_pid(pid); if (!create_client(connection)) { return NVQR_ERROR_UNKNOWN; } if (!open_server_connection(&(connection->server_handle), pid)) { destroy_client(*connection); return NVQR_ERROR_NOT_SUPPORTED; } if (!connect_to_server(connection)) { destroy_client(*connection); close_server_connection(connection->server_handle); return NVQR_ERROR_UNKNOWN; } return NVQR_SUCCESS; }
Client* manage_client(Window win) { if (get_client_from_window(win)) return NULL; // init client Client* client = create_client(); Monitor* m = get_current_monitor(); // set to window properties client->window = win; client_update_title(client); // apply rules int manage = 1; rules_apply(client, &manage); if (!manage) { destroy_client(client); // map it... just to be sure XMapWindow(gDisplay, win); return NULL; } unsigned int border, depth; Window root_win; int x, y; unsigned int w, h; XGetGeometry(gDisplay, win, &root_win, &x, &y, &w, &h, &border, &depth); // treat wanted coordinates as floating coords XRectangle size = client->float_size; size.width = w; size.height = h; size.x = m->rect.x + m->rect.width/2 - size.width/2; size.y = m->rect.y + m->rect.height/2 - size.height/2 + bar_height; client->float_size = size; client->last_size = size; XMoveResizeWindow(gDisplay, client->window, size.x, size.y, size.width, size.height); // actually manage it g_array_append_val(g_clients, client); XSetWindowBorderWidth(gDisplay, win, window_border_width); // insert to layout if (!client->tag) client->tag = m->tag; // get events from window client_update_wm_hints(client); XSelectInput(gDisplay, win, CLIENT_EVENT_MASK); window_grab_button(win); frame_insert_window(client->tag->frame, win); monitor_apply_layout(find_monitor_with_tag(client->tag)); return client; }
static void closed_evt(mrp_transport_t *t, int error, void *user_data) { client_t *c = (client_t *)user_data; MRP_UNUSED(t); if (error != 0) mrp_log_error("Native client connection closed with error %d (%s).", error, strerror(error)); else mrp_log_info("Native client connection closed."); destroy_client(c); }
static void cleanup(void) { client_list *c, *c2; PRINTF(("cleanup_and_exit: starting\n")); c = clients; while (c) { c2 = c; c = c->next; destroy_client(c2->client); free(c2); } exit(0); }
static int destroy_server(lua_State *L, server_t *server) { if (server->sock) { int ret = close(server->sock); if (ret) return LUA_HANDLE_ERROR(L, errno); server->sock = 0; } client_t *client = server->clients; while (client) { client_t *next = client->next; destroy_client(L, client); client = next; } server->clients = NULL; server->num_clients = 0; destroy_copy_context(&server->copy_context); return 0; }
void destro_delete(destro_t *ctx, destro_client_t *client) { if (!client) { ctx->destroyed = true; if (!ctx->in_callback) { DBG(ctx, "Destroying main directly"); destroy_main(ctx); } else { DBG(ctx, "Adding main to delayed destruction"); ctx->destroyed = true; } } else { client->destroyed = true; if (!ctx->in_callback) { DBG(ctx, "Destroying client %p directly", client); destroy_client(ctx, client); } else { DBG(ctx, "Adding %p to delayed removal", client); if (!ctx->deleted_tail) { assert(!ctx->deleted_head); ctx->deleted_head = ctx->deleted_tail = client; } else { ctx->deleted_tail->next = client; ctx->deleted_tail = client; } if (ctx->cbs.close && !client->closed) { ctx->cbs.close(ctx->caller_ctx, (void *)client); } client->closed = true; } } }
void destro_cb_exit(destro_t *ctx) { ctx->in_callback = false; while (ctx->deleted_head) { destro_client_t *client = ctx->deleted_head; DBG(ctx, "Delayed removing %p", client); ctx->deleted_head = client->next; destroy_client(ctx, client); } ctx->deleted_head = ctx->deleted_tail = NULL; if (ctx->destroyed) { DBG(ctx, "Delayed destroying main context"); destroy_main(ctx); } }
void remove_client(int fd) { remove_from_el(&server.el, fd); destroy_client(server.clients[fd]); server.clients[fd] = NULL; }
/* * Keep-alive test. */ static int keep_alive_test(pj_stun_config *cfg) { struct stun_srv *srv; struct stun_client *client; pj_sockaddr_in mapped_addr; pj_stun_sock_info info; pj_str_t srv_addr; pj_time_val timeout, t; int i, ret = 0; pj_status_t status; PJ_LOG(3,(THIS_FILE, " normal operation")); status = create_client(cfg, &client, PJ_TRUE); if (status != PJ_SUCCESS) return -310; status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN|WITH_XOR_MAPPED, &srv); if (status != PJ_SUCCESS) { destroy_client(client); return -320; } /* * Part 1: initial Binding resolution. */ PJ_LOG(3,(THIS_FILE, " initial Binding request")); srv_addr = pj_str("127.0.0.1"); status = pj_stun_sock_start(client->sock, &srv_addr, pj_ntohs(srv->addr.ipv4.sin_port), NULL); if (status != PJ_SUCCESS) { destroy_server(srv); destroy_client(client); return -330; } /* Wait until on_status() callback is called with success status */ pj_gettimeofday(&timeout); timeout.sec += 60; do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout)); /* Check that callback with correct operation is called */ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) { PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status")); ret = -340; goto on_return; } if (client->last_status != PJ_SUCCESS) { PJ_LOG(3,(THIS_FILE, " error: expecting PJ_SUCCESS status")); ret = -350; goto on_return; } /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -370; goto on_return; } /* Get info */ pj_bzero(&info, sizeof(info)); pj_stun_sock_get_info(client->sock, &info); /* Check that we have server address */ if (!pj_sockaddr_has_addr(&info.srv_addr)) { PJ_LOG(3,(THIS_FILE, " error: missing server address")); ret = -380; goto on_return; } /* .. and bound address port must not be zero */ if (pj_sockaddr_get_port(&info.bound_addr)==0) { PJ_LOG(3,(THIS_FILE, " error: bound address is zero")); ret = -381; goto on_return; } /* .. and mapped address */ if (!pj_sockaddr_has_addr(&info.mapped_addr)) { PJ_LOG(3,(THIS_FILE, " error: missing mapped address")); ret = -382; goto on_return; } /* verify the mapped address */ pj_sockaddr_in_init(&mapped_addr, &srv->ip_to_send, srv->port_to_send); if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) { PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched")); ret = -383; goto on_return; } /* .. and at least one alias */ if (info.alias_cnt == 0) { PJ_LOG(3,(THIS_FILE, " error: must have at least one alias")); ret = -384; goto on_return; } if (!pj_sockaddr_has_addr(&info.aliases[0])) { PJ_LOG(3,(THIS_FILE, " error: missing alias")); ret = -386; goto on_return; } /* * Part 2: sending and receiving data */ PJ_LOG(3,(THIS_FILE, " sending/receiving data")); /* Change server operation mode to echo back data */ srv->flag = ECHO; /* Reset server */ srv->rx_cnt = 0; /* Client sending data to echo server */ { char txt[100]; PJ_LOG(3,(THIS_FILE, " sending to %s", pj_sockaddr_print(&info.srv_addr, txt, sizeof(txt), 3))); } status = pj_stun_sock_sendto(client->sock, NULL, &ret, sizeof(ret), 0, &info.srv_addr, pj_sockaddr_get_len(&info.srv_addr)); if (status != PJ_SUCCESS && status != PJ_EPENDING) { app_perror(" error: server sending data", status); ret = -390; goto on_return; } /* Wait for a short period until client receives data. We can't wait for * too long otherwise the keep-alive will kick in. */ pj_gettimeofday(&timeout); timeout.sec += 1; do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (client->on_rx_data_cnt==0 && PJ_TIME_VAL_LT(t, timeout)); /* Check that data is received in server */ if (srv->rx_cnt == 0) { PJ_LOG(3,(THIS_FILE, " error: server didn't receive data")); ret = -395; goto on_return; } /* Check that status is still OK */ if (client->last_status != PJ_SUCCESS) { app_perror(" error: client has failed", client->last_status); ret = -400; goto on_return; } /* Check that data has been received */ if (client->on_rx_data_cnt == 0) { PJ_LOG(3,(THIS_FILE, " error: client doesn't receive data")); ret = -410; goto on_return; } /* * Part 3: Successful keep-alive, */ PJ_LOG(3,(THIS_FILE, " successful keep-alive scenario")); /* Change server operation mode to normal mode */ srv->flag = RESPOND_STUN | WITH_XOR_MAPPED; /* Reset server */ srv->rx_cnt = 0; /* Reset client */ client->on_status_cnt = 0; client->last_status = PJ_SUCCESS; client->on_rx_data_cnt = 0; /* Wait for keep-alive duration to see if client actually sends the * keep-alive. */ pj_gettimeofday(&timeout); timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + 1); do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (PJ_TIME_VAL_LT(t, timeout)); /* Check that server receives some packets */ if (srv->rx_cnt == 0) { PJ_LOG(3, (THIS_FILE, " error: no keep-alive was received")); ret = -420; goto on_return; } /* Check that client status is still okay and on_status() callback is NOT * called */ /* No longer valid due to this ticket: * http://trac.pjsip.org/repos/ticket/742 if (client->on_status_cnt != 0) { PJ_LOG(3, (THIS_FILE, " error: on_status() must not be called on successful" "keep-alive when mapped-address does not change")); ret = -430; goto on_return; } */ /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -440; goto on_return; } /* * Part 4: Successful keep-alive with IP address change */ PJ_LOG(3,(THIS_FILE, " mapped IP address change")); /* Change server operation mode to normal mode */ srv->flag = RESPOND_STUN | WITH_XOR_MAPPED; /* Change mapped address in the response */ srv->ip_to_send = pj_str("2.2.2.2"); srv->port_to_send++; /* Reset server */ srv->rx_cnt = 0; /* Reset client */ client->on_status_cnt = 0; client->last_status = PJ_SUCCESS; client->on_rx_data_cnt = 0; /* Wait for keep-alive duration to see if client actually sends the * keep-alive. */ pj_gettimeofday(&timeout); timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + 1); do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (PJ_TIME_VAL_LT(t, timeout)); /* Check that server receives some packets */ if (srv->rx_cnt == 0) { PJ_LOG(3, (THIS_FILE, " error: no keep-alive was received")); ret = -450; goto on_return; } /* Check that on_status() callback is called (because mapped address * has changed) */ if (client->on_status_cnt != 1) { PJ_LOG(3, (THIS_FILE, " error: on_status() was not called")); ret = -460; goto on_return; } /* Check that callback was called with correct operation */ if (client->last_op != PJ_STUN_SOCK_MAPPED_ADDR_CHANGE) { PJ_LOG(3,(THIS_FILE, " error: expecting keep-alive operation status")); ret = -470; goto on_return; } /* Check that last status is still success */ if (client->last_status != PJ_SUCCESS) { PJ_LOG(3, (THIS_FILE, " error: expecting successful status")); ret = -480; goto on_return; } /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -490; goto on_return; } /* Get info */ pj_bzero(&info, sizeof(info)); pj_stun_sock_get_info(client->sock, &info); /* Check that we have server address */ if (!pj_sockaddr_has_addr(&info.srv_addr)) { PJ_LOG(3,(THIS_FILE, " error: missing server address")); ret = -500; goto on_return; } /* .. and mapped address */ if (!pj_sockaddr_has_addr(&info.mapped_addr)) { PJ_LOG(3,(THIS_FILE, " error: missing mapped address")); ret = -510; goto on_return; } /* verify the mapped address */ pj_sockaddr_in_init(&mapped_addr, &srv->ip_to_send, srv->port_to_send); if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) { PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched")); ret = -520; goto on_return; } /* .. and at least one alias */ if (info.alias_cnt == 0) { PJ_LOG(3,(THIS_FILE, " error: must have at least one alias")); ret = -530; goto on_return; } if (!pj_sockaddr_has_addr(&info.aliases[0])) { PJ_LOG(3,(THIS_FILE, " error: missing alias")); ret = -540; goto on_return; } /* * Part 5: Failed keep-alive */ PJ_LOG(3,(THIS_FILE, " failed keep-alive scenario")); /* Change server operation mode to respond without attribute */ srv->flag = RESPOND_STUN; /* Reset server */ srv->rx_cnt = 0; /* Reset client */ client->on_status_cnt = 0; client->last_status = PJ_SUCCESS; client->on_rx_data_cnt = 0; /* Wait until on_status() is called with failure. */ pj_gettimeofday(&timeout); timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + PJ_STUN_TIMEOUT_VALUE + 5); do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout)); /* Check that callback with correct operation is called */ if (client->last_op != PJ_STUN_SOCK_KEEP_ALIVE_OP) { PJ_LOG(3,(THIS_FILE, " error: expecting keep-alive operation status")); ret = -600; goto on_return; } if (client->last_status == PJ_SUCCESS) { PJ_LOG(3,(THIS_FILE, " error: expecting failed keep-alive")); ret = -610; goto on_return; } /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -620; goto on_return; } on_return: destroy_server(srv); destroy_client(client); for (i=0; i<7; ++i) handle_events(cfg, 50); return ret; }
/* * Invalid response scenario: when server returns no MAPPED-ADDRESS or * XOR-MAPPED-ADDRESS attribute. */ static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err) { struct stun_srv *srv; struct stun_client *client; pj_str_t srv_addr; pj_time_val timeout, t; int i, ret = 0; pj_status_t status; PJ_LOG(3,(THIS_FILE, " missing attribute test [%d]", destroy_on_err)); status = create_client(cfg, &client, destroy_on_err); if (status != PJ_SUCCESS) return -110; status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN, &srv); if (status != PJ_SUCCESS) { destroy_client(client); return -120; } srv_addr = pj_str("127.0.0.1"); status = pj_stun_sock_start(client->sock, &srv_addr, pj_ntohs(srv->addr.ipv4.sin_port), NULL); if (status != PJ_SUCCESS) { destroy_server(srv); destroy_client(client); return -130; } /* Wait until on_status() callback is called with the failure */ pj_gettimeofday(&timeout); timeout.sec += 60; do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout)); /* Check that callback with correct operation is called */ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) { PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status")); ret = -140; goto on_return; } if (client->last_status != PJNATH_ESTUNNOMAPPEDADDR) { PJ_LOG(3,(THIS_FILE, " error: expecting PJNATH_ESTUNNOMAPPEDADDR")); ret = -150; goto on_return; } /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -170; goto on_return; } on_return: destroy_server(srv); destroy_client(client); for (i=0; i<7; ++i) handle_events(cfg, 50); return ret; }
/** * User thread responsible for handling individual clients * @param arg Structure to use to process the thread */ void *user_thread( void *arg ) { User user = new_user( ((Environment)arg)->client ); Management manager = ((Environment)arg)->manager; // increase thread count manager_up_thread( manager, pthread_self() ); // lock user for now user_lock( user ); // add the user to the list of users manager_add_user( manager, user ); // turn client on client_ok( user->client ); // ready user for commo user_unlock( user ); /* --------------------------------------------- * Login screen */ Room login_room = room_get( manager->rooms, 0 ); if( login_room == NULL ){ printf("Error getting login room\n"); //return 0; } char_set_room( user->parent->character, login_room ); // have a look action_look( NULL, user, manager ); // client_prompt( ) // accept input // initialize int buff_len = 256; char *buff = (char *) malloc( buff_len ); memset( buff, 0, 256 ); if( buff == NULL ){ perror("client thread recv buffer malloc"); return NULL; } // user response loop while( manager->cease != 1 && (user->flags & USER_FLAG_EXIT) == 0 ){ // receive response from client int length = client_recv( user->client, &buff, &buff_len ); // handle response if( buff != NULL && length > 0 ){ // parse the response for actionable text action_parse_response( buff, // buffer with response length, // length of response user, // user making response manager ); // manager // reset response memset(buff,0,length); } else { // nothing going on, yield sched_yield(); } } free( buff ); // remove from manager list manager_remove_user( manager, user ); // lock user for this user_lock( user ); // close and destory all structs // manually manage this part close_client( user->client ); destroy_client( user->client ); // user gone from manager so safe to unlock a head user_unlock( user ); // now destroy destroy_user( user ); free( ((Environment)arg) ); // decrease thread count manager_down_thread( manager, pthread_self() ); pthread_exit( 0 ); return NULL; }
/* * create_tcp_client () */ struct client *create_tcp_client ( SOCKET sock ) { int status; struct client *client; int intTrue = TRUE; osiSocklen_t addrSize; unsigned priorityOfEvents; /* socket passed in is destroyed here if unsuccessful */ client = create_client ( sock, IPPROTO_TCP ); if ( ! client ) { return NULL; } /* * see TCP(4P) this seems to make unsolicited single events much * faster. I take care of queue up as load increases. */ status = setsockopt ( sock, IPPROTO_TCP, TCP_NODELAY, (char *) &intTrue, sizeof (intTrue) ); if (status < 0) { errlogPrintf ( "CAS: TCP_NODELAY option set failed\n" ); destroy_client ( client ); return NULL; } /* * turn on KEEPALIVE so if the client crashes * this task will find out and exit */ status = setsockopt ( sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &intTrue, sizeof (intTrue) ); if ( status < 0 ) { errlogPrintf ( "CAS: SO_KEEPALIVE option set failed\n" ); destroy_client ( client ); return NULL; } /* * some concern that vxWorks will run out of mBuf's * if this change is made * * joh 11-10-98 */ #if 0 /* * set TCP buffer sizes to be synergistic * with CA internal buffering */ i = MAX_MSG_SIZE; status = setsockopt ( sock, SOL_SOCKET, SO_SNDBUF, (char *) &i, sizeof (i) ); if (status < 0) { errlogPrintf ( "CAS: SO_SNDBUF set failed\n" ); destroy_client ( client ); return NULL; } i = MAX_MSG_SIZE; status = setsockopt ( sock, SOL_SOCKET, SO_RCVBUF, (char *) &i, sizeof (i) ); if (status < 0) { errlogPrintf ( "CAS: SO_RCVBUF set failed\n" ); destroy_client ( client ); return NULL; } #endif addrSize = sizeof ( client->addr ); status = getpeername ( sock, (struct sockaddr *)&client->addr, &addrSize ); if ( status < 0 ) { epicsPrintf ("CAS: peer address fetch failed\n"); destroy_tcp_client (client); return NULL; } client->evuser = (struct event_user *) db_init_events (); if ( ! client->evuser ) { errlogPrintf ("CAS: unable to init the event facility\n"); destroy_tcp_client (client); return NULL; } status = db_add_extra_labor_event ( client->evuser, rsrv_extra_labor, client ); if (status != DB_EVENT_OK) { errlogPrintf("CAS: unable to setup the event facility\n"); destroy_tcp_client (client); return NULL; } { epicsThreadBooleanStatus tbs; tbs = epicsThreadHighestPriorityLevelBelow ( epicsThreadPriorityCAServerLow, &priorityOfEvents ); if ( tbs != epicsThreadBooleanStatusSuccess ) { priorityOfEvents = epicsThreadPriorityCAServerLow; } } status = db_start_events ( client->evuser, "CAS-event", NULL, NULL, priorityOfEvents ); if ( status != DB_EVENT_OK ) { errlogPrintf ( "CAS: unable to start the event facility\n" ); destroy_tcp_client ( client ); return NULL; } /* * add first version message should it be needed */ rsrv_version_reply ( client ); if ( CASDEBUG > 0 ) { char buf[64]; ipAddrToDottedIP ( &client->addr, buf, sizeof(buf) ); errlogPrintf ( "CAS: conn req from %s\n", buf ); } return client; }
/* * create_client () */ struct client * create_client ( SOCKET sock, int proto ) { struct client *client; int spaceAvailOnFreeList; size_t spaceNeeded; /* * stop further use of server if memory becomes scarse */ spaceAvailOnFreeList = freeListItemsAvail ( rsrvClientFreeList ) > 0 && freeListItemsAvail ( rsrvSmallBufFreeListTCP ) > 0; spaceNeeded = sizeof (struct client) + MAX_TCP; if ( ! ( osiSufficentSpaceInPool(spaceNeeded) || spaceAvailOnFreeList ) ) { epicsSocketDestroy ( sock ); epicsPrintf ("CAS: no space in pool for a new client (below max block thresh)\n"); return NULL; } client = freeListCalloc ( rsrvClientFreeList ); if ( ! client ) { epicsSocketDestroy ( sock ); epicsPrintf ("CAS: no space in pool for a new client (alloc failed)\n"); return NULL; } client->sock = sock; client->proto = proto; client->blockSem = epicsEventCreate ( epicsEventEmpty ); client->lock = epicsMutexCreate(); client->putNotifyLock = epicsMutexCreate(); client->chanListLock = epicsMutexCreate(); client->eventqLock = epicsMutexCreate(); if ( ! client->blockSem || ! client->lock || ! client->putNotifyLock || ! client->chanListLock || ! client->eventqLock ) { destroy_client ( client ); return NULL; } client->pUserName = NULL; client->pHostName = NULL; ellInit ( & client->chanList ); ellInit ( & client->chanPendingUpdateARList ); ellInit ( & client->putNotifyQue ); memset ( (char *)&client->addr, 0, sizeof (client->addr) ); client->tid = 0; if ( proto == IPPROTO_TCP ) { client->send.buf = (char *) freeListCalloc ( rsrvSmallBufFreeListTCP ); client->send.maxstk = MAX_TCP; client->send.type = mbtSmallTCP; client->recv.buf = (char *) freeListCalloc ( rsrvSmallBufFreeListTCP ); client->recv.maxstk = MAX_TCP; client->recv.type = mbtSmallTCP; } else if ( proto == IPPROTO_UDP ) { client->send.buf = malloc ( MAX_UDP_SEND ); client->send.maxstk = MAX_UDP_SEND; client->send.type = mbtUDP; client->recv.buf = malloc ( MAX_UDP_RECV ); client->recv.maxstk = MAX_UDP_RECV; client->recv.type = mbtUDP; } if ( ! client->send.buf || ! client->recv.buf ) { destroy_client ( client ); return NULL; } client->send.stk = 0u; client->send.cnt = 0u; client->recv.stk = 0u; client->recv.cnt = 0u; client->evuser = NULL; client->priority = CA_PROTO_PRIORITY_MIN; client->disconnect = FALSE; epicsTimeGetCurrent ( &client->time_at_last_send ); epicsTimeGetCurrent ( &client->time_at_last_recv ); client->minor_version_number = CA_UKN_MINOR_VERSION; client->recvBytesToDrain = 0u; return client; }
/* * Timeout test: scenario when no response is received from server */ static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err) { struct stun_srv *srv; struct stun_client *client; pj_str_t srv_addr; pj_time_val timeout, t; int ret = 0; pj_status_t status; PJ_LOG(3,(THIS_FILE, " timeout test [%d]", destroy_on_err)); status = create_client(cfg, &client, destroy_on_err); if (status != PJ_SUCCESS) return -10; status = create_server(client->pool, cfg->ioqueue, 0, &srv); if (status != PJ_SUCCESS) { destroy_client(client); return -20; } srv_addr = pj_str("127.0.0.1"); status = pj_stun_sock_start(client->sock, &srv_addr, pj_ntohs(srv->addr.ipv4.sin_port), NULL); if (status != PJ_SUCCESS) { destroy_server(srv); destroy_client(client); return -30; } /* Wait until on_status() callback is called with the failure */ pj_gettimeofday(&timeout); timeout.sec += 60; do { handle_events(cfg, 100); pj_gettimeofday(&t); } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout)); /* Check that callback with correct operation is called */ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) { PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status")); ret = -40; goto on_return; } /* .. and with the correct status */ if (client->last_status != PJNATH_ESTUNTIMEDOUT) { PJ_LOG(3,(THIS_FILE, " error: expecting PJNATH_ESTUNTIMEDOUT")); ret = -50; goto on_return; } /* Check that server received correct retransmissions */ if (srv->rx_cnt != PJ_STUN_MAX_TRANSMIT_COUNT) { PJ_LOG(3,(THIS_FILE, " error: expecting %d retransmissions, got %d", PJ_STUN_MAX_TRANSMIT_COUNT, srv->rx_cnt)); ret = -60; goto on_return; } /* Check that client doesn't receive anything */ if (client->on_rx_data_cnt != 0) { PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything")); ret = -70; goto on_return; } on_return: destroy_server(srv); destroy_client(client); return ret; }
enum auth_stat gssrpc__svcauth_gssapi( register struct svc_req *rqst, register struct rpc_msg *msg, bool_t *no_dispatch) { XDR xdrs; auth_gssapi_creds creds; auth_gssapi_init_arg call_arg; auth_gssapi_init_res call_res; gss_buffer_desc output_token, in_buf, out_buf; gss_cred_id_t server_creds; struct gss_channel_bindings_struct bindings, *bindp; OM_uint32 gssstat, minor_stat, time_rec; struct opaque_auth *cred, *verf; svc_auth_gssapi_data *client_data; int i; enum auth_stat ret; OM_uint32 ret_flags; uint32_t seq_num; PRINTF(("svcauth_gssapi: starting\n")); /* clean up expired entries */ clean_client(); /* use AUTH_NONE until there is a client_handle */ rqst->rq_xprt->xp_auth = &svc_auth_none; memset((char *) &call_res, 0, sizeof(call_res)); creds.client_handle.length = 0; creds.client_handle.value = NULL; cred = &msg->rm_call.cb_cred; verf = &msg->rm_call.cb_verf; if (cred->oa_length == 0) { PRINTF(("svcauth_gssapi: empty creds, failing\n")); LOG_MISCERR("empty client credentials"); ret = AUTH_BADCRED; goto error; } PRINTF(("svcauth_gssapi: decoding credentials\n")); xdrmem_create(&xdrs, cred->oa_base, cred->oa_length, XDR_DECODE); memset((char *) &creds, 0, sizeof(creds)); if (! xdr_authgssapi_creds(&xdrs, &creds)) { PRINTF(("svcauth_gssapi: failed decoding creds\n")); LOG_MISCERR("protocol error in client credentials"); xdr_free(xdr_authgssapi_creds, &creds); XDR_DESTROY(&xdrs); ret = AUTH_BADCRED; goto error; } XDR_DESTROY(&xdrs); PRINTF(("svcauth_gssapi: got credentials, version %d, client_handle len %d\n", creds.version, (int) creds.client_handle.length)); if (creds.version != 2) { PRINTF(("svcauth_gssapi: bad credential version\n")); LOG_MISCERR("unsupported client credentials version"); ret = AUTH_BADCRED; goto error; } #ifdef DEBUG_GSSAPI if (svc_debug_gssapi) { if (creds.auth_msg && rqst->rq_proc == AUTH_GSSAPI_EXIT) { PRINTF(("svcauth_gssapi: GSSAPI_EXIT, cleaning up\n")); svc_sendreply(rqst->rq_xprt, xdr_void, NULL); xdr_free(xdr_authgssapi_creds, &creds); cleanup(); exit(0); } } #endif /* * If this is an auth_msg and proc is GSSAPI_INIT, then create a * client handle for this client. Otherwise, look up the * existing handle. */ if (creds.auth_msg && rqst->rq_proc == AUTH_GSSAPI_INIT) { if (creds.client_handle.length != 0) { PRINTF(("svcauth_gssapi: non-empty handle on GSSAPI_INIT\n")); LOG_MISCERR("protocol error in client handle"); ret = AUTH_FAILED; goto error; } PRINTF(("svcauth_gssapi: GSSAPI_INIT, creating client.\n")); client_data = create_client(); if (client_data == NULL) { PRINTF(("svcauth_gssapi: create_client failed\n")); LOG_MISCERR("internal error creating client record"); ret = AUTH_FAILED; goto error; } } else { if (creds.client_handle.length == 0) { PRINTF(("svcauth_gssapi: expected non-empty creds\n")); LOG_MISCERR("protocol error in client credentials"); ret = AUTH_FAILED; goto error; } PRINTF(("svcauth_gssapi: incoming client_handle %d, len %d\n", *((uint32_t *) creds.client_handle.value), (int) creds.client_handle.length)); client_data = get_client(&creds.client_handle); if (client_data == NULL) { PRINTF(("svcauth_gssapi: client_handle lookup failed\n")); LOG_MISCERR("invalid client handle received"); ret = AUTH_BADCRED; goto error; } PRINTF(("svcauth_gssapi: client_handle lookup succeeded\n")); } /* any response we send will use client_handle, so set it now */ call_res.client_handle.length = sizeof(client_data->key); call_res.client_handle.value = (char *) &client_data->key; /* mark this call as using AUTH_GSSAPI via client_data's SVCAUTH */ rqst->rq_xprt->xp_auth = &client_data->svcauth; if (client_data->established == FALSE) { PRINTF(("svcauth_gssapi: context is not established\n")); if (creds.auth_msg == FALSE) { PRINTF(("svcauth_gssapi: expected auth_msg TRUE\n")); LOG_MISCERR("protocol error on incomplete connection"); ret = AUTH_REJECTEDCRED; goto error; } /* * If the context is not established, then only GSSAPI_INIT * and _CONTINUE requests are valid. */ if (rqst->rq_proc != AUTH_GSSAPI_INIT && rqst->rq_proc != AUTH_GSSAPI_CONTINUE_INIT) { PRINTF(("svcauth_gssapi: unacceptable procedure %d\n", rqst->rq_proc)); LOG_MISCERR("protocol error on incomplete connection"); ret = AUTH_FAILED; goto error; } /* call is for us, deserialize arguments */ memset(&call_arg, 0, sizeof(call_arg)); if (! svc_getargs(rqst->rq_xprt, xdr_authgssapi_init_arg, &call_arg)) { PRINTF(("svcauth_gssapi: cannot decode args\n")); LOG_MISCERR("protocol error in procedure arguments"); ret = AUTH_BADCRED; goto error; } /* * Process the call arg version number. * * Set the krb5_gss backwards-compatibility mode based on client * version. This controls whether the AP_REP message is * encrypted with the session key (version 2+, correct) or the * session subkey (version 1, incorrect). This function can * never fail, so we don't bother checking its return value. */ switch (call_arg.version) { case 1: case 2: LOG_MISCERR("Warning: Accepted old RPC protocol request"); call_res.version = 1; break; case 3: case 4: /* 3 and 4 are essentially the same, don't bother warning */ call_res.version = call_arg.version; break; default: PRINTF(("svcauth_gssapi: bad GSSAPI_INIT version\n")); LOG_MISCERR("unsupported GSSAPI_INIT version"); ret = AUTH_BADCRED; goto error; } #ifdef GSS_BACKWARD_HACK krb5_gss_set_backward_mode(&minor_stat, call_arg.version == 1); #endif if (call_arg.version >= 3) { memset(&bindings, 0, sizeof(bindings)); bindings.application_data.length = 0; bindings.initiator_addrtype = GSS_C_AF_INET; bindings.initiator_address.length = 4; bindings.initiator_address.value = &svc_getcaller(rqst->rq_xprt)->sin_addr.s_addr; if (rqst->rq_xprt->xp_laddrlen > 0) { bindings.acceptor_addrtype = GSS_C_AF_INET; bindings.acceptor_address.length = 4; bindings.acceptor_address.value = &rqst->rq_xprt->xp_laddr.sin_addr.s_addr; } else { LOG_MISCERR("cannot get local address"); ret = AUTH_FAILED; goto error; } bindp = &bindings; } else { bindp = GSS_C_NO_CHANNEL_BINDINGS; } /* * If the client's server_creds is already set, use it. * Otherwise, try each credential in server_creds_list until * one of them succeedes, then set the client server_creds * to that. If all fail, the client's server_creds isn't * set (which is fine, because the client will be gc'ed * anyway). * * If accept_sec_context returns something other than * success and GSS_S_FAILURE, then assume different * credentials won't help and stop looping. * * Note that there are really two cases here: (1) the client * has a server_creds already, and (2) it does not. They * are both written in the same loop so that there is only * one textual call to gss_accept_sec_context; in fact, in * case (1), the loop is executed exactly once. */ for (i = 0; i < server_creds_count; i++) { if (client_data->server_creds != NULL) { PRINTF(("svcauth_gssapi: using's clients server_creds\n")); server_creds = client_data->server_creds; } else { PRINTF(("svcauth_gssapi: trying creds %d\n", i)); server_creds = server_creds_list[i]; } /* Free previous output_token from loop */ if(i != 0) gss_release_buffer(&minor_stat, &output_token); call_res.gss_major = gss_accept_sec_context(&call_res.gss_minor, &client_data->context, server_creds, &call_arg.token, bindp, &client_data->client_name, NULL, &output_token, &ret_flags, &time_rec, NULL); if (server_creds == client_data->server_creds) break; PRINTF(("accept_sec_context returned 0x%x 0x%x wrong-princ=%#x\n", call_res.gss_major, call_res.gss_minor, (int) KRB5KRB_AP_WRONG_PRINC)); if (call_res.gss_major == GSS_S_COMPLETE || call_res.gss_major == GSS_S_CONTINUE_NEEDED) { /* server_creds was right, set it! */ PRINTF(("svcauth_gssapi: creds are correct, storing\n")); client_data->server_creds = server_creds; client_data->server_name = server_name_list[i]; break; } else if (call_res.gss_major != GSS_S_FAILURE #ifdef GSSAPI_KRB5 /* * hard-coded because there is no other way * to prevent all GSS_S_FAILURES from * returning a "wrong principal in request" * error */ || ((krb5_error_code) call_res.gss_minor != (krb5_error_code) KRB5KRB_AP_WRONG_PRINC) #endif ) { break; } } gssstat = call_res.gss_major; minor_stat = call_res.gss_minor; /* done with call args */ xdr_free(xdr_authgssapi_init_arg, &call_arg); PRINTF(("svcauth_gssapi: accept_sec_context returned %#x %#x\n", call_res.gss_major, call_res.gss_minor)); if (call_res.gss_major != GSS_S_COMPLETE && call_res.gss_major != GSS_S_CONTINUE_NEEDED) { AUTH_GSSAPI_DISPLAY_STATUS(("accepting context", call_res.gss_major, call_res.gss_minor)); if (log_badauth != NULL) (*log_badauth)(call_res.gss_major, call_res.gss_minor, &rqst->rq_xprt->xp_raddr, log_badauth_data); gss_release_buffer(&minor_stat, &output_token); svc_sendreply(rqst->rq_xprt, xdr_authgssapi_init_res, (caddr_t) &call_res); *no_dispatch = TRUE; ret = AUTH_OK; goto error; } if (output_token.length != 0) { PRINTF(("svcauth_gssapi: got new output token\n")); GSS_COPY_BUFFER(call_res.token, output_token); } if (gssstat == GSS_S_COMPLETE) { client_data->seq_num = rand(); client_expire(client_data, (time_rec == GSS_C_INDEFINITE ? INDEF_EXPIRE : time_rec) + time(0)); PRINTF(("svcauth_gssapi: context established, isn %d\n", client_data->seq_num)); if (auth_gssapi_seal_seq(client_data->context, client_data->seq_num, &call_res.signed_isn) == FALSE) { ret = AUTH_FAILED; LOG_MISCERR("internal error sealing sequence number"); gss_release_buffer(&minor_stat, &output_token); goto error; } } PRINTF(("svcauth_gssapi: sending reply\n")); svc_sendreply(rqst->rq_xprt, xdr_authgssapi_init_res, (caddr_t) &call_res); *no_dispatch = TRUE; /* * If appropriate, set established to TRUE *after* sending * response (otherwise, the client will receive the final * token encrypted) */ if (gssstat == GSS_S_COMPLETE) { gss_release_buffer(&minor_stat, &call_res.signed_isn); client_data->established = TRUE; } gss_release_buffer(&minor_stat, &output_token); } else { PRINTF(("svcauth_gssapi: context is established\n")); /* check the verifier */ PRINTF(("svcauth_gssapi: checking verifier, len %d\n", verf->oa_length)); in_buf.length = verf->oa_length; in_buf.value = verf->oa_base; if (auth_gssapi_unseal_seq(client_data->context, &in_buf, &seq_num) == FALSE) { ret = AUTH_BADVERF; LOG_MISCERR("internal error unsealing sequence number"); goto error; } if (seq_num != client_data->seq_num + 1) { PRINTF(("svcauth_gssapi: expected isn %d, got %d\n", client_data->seq_num + 1, seq_num)); if (log_badverf != NULL) (*log_badverf)(client_data->client_name, client_data->server_name, rqst, msg, log_badverf_data); ret = AUTH_REJECTEDVERF; goto error; } client_data->seq_num++; PRINTF(("svcauth_gssapi: seq_num %d okay\n", seq_num)); /* free previous response verifier, if any */ if (client_data->prev_verf.length != 0) { gss_release_buffer(&minor_stat, &client_data->prev_verf); client_data->prev_verf.length = 0; } /* prepare response verifier */ seq_num = client_data->seq_num + 1; if (auth_gssapi_seal_seq(client_data->context, seq_num, &out_buf) == FALSE) { ret = AUTH_FAILED; LOG_MISCERR("internal error sealing sequence number"); goto error; } client_data->seq_num++; PRINTF(("svcauth_gssapi; response seq_num %d\n", seq_num)); rqst->rq_xprt->xp_verf.oa_flavor = AUTH_GSSAPI; rqst->rq_xprt->xp_verf.oa_base = out_buf.value; rqst->rq_xprt->xp_verf.oa_length = out_buf.length; /* save verifier so it can be freed next time */ client_data->prev_verf.value = out_buf.value; client_data->prev_verf.length = out_buf.length; /* * Message is authentic. If auth_msg if true, process the * call; otherwise, return AUTH_OK so it will be dispatched * to the application server. */ if (creds.auth_msg == TRUE) { /* * If process_token fails, then the token probably came * from an attacker. No response (error or otherwise) * should be returned to the client, since it won't be * accepting one. */ switch (rqst->rq_proc) { case AUTH_GSSAPI_MSG: PRINTF(("svcauth_gssapi: GSSAPI_MSG, getting args\n")); memset(&call_arg, 0, sizeof(call_arg)); if (! svc_getargs(rqst->rq_xprt, xdr_authgssapi_init_arg, &call_arg)) { PRINTF(("svcauth_gssapi: cannot decode args\n")); LOG_MISCERR("protocol error in call arguments"); xdr_free(xdr_authgssapi_init_arg, &call_arg); ret = AUTH_BADCRED; goto error; } PRINTF(("svcauth_gssapi: processing token\n")); gssstat = gss_process_context_token(&minor_stat, client_data->context, &call_arg.token); /* done with call args */ xdr_free(xdr_authgssapi_init_arg, &call_arg); if (gssstat != GSS_S_COMPLETE) { AUTH_GSSAPI_DISPLAY_STATUS(("processing token", gssstat, minor_stat)); ret = AUTH_FAILED; goto error; } svc_sendreply(rqst->rq_xprt, xdr_void, NULL); *no_dispatch = TRUE; break; case AUTH_GSSAPI_DESTROY: PRINTF(("svcauth_gssapi: GSSAPI_DESTROY\n")); PRINTF(("svcauth_gssapi: sending reply\n")); svc_sendreply(rqst->rq_xprt, xdr_void, NULL); *no_dispatch = TRUE; destroy_client(client_data); rqst->rq_xprt->xp_auth = NULL; break; default: PRINTF(("svcauth_gssapi: unacceptable procedure %d\n", rqst->rq_proc)); LOG_MISCERR("invalid call procedure number"); ret = AUTH_FAILED; goto error; } } else { /* set credentials for app server; comment in svc.c */ /* seems to imply this is incorrect, but I don't see */ /* any problem with it... */ rqst->rq_clntcred = (char *)client_data->client_name; rqst->rq_svccred = (char *)client_data->context; } } if (creds.client_handle.length != 0) { PRINTF(("svcauth_gssapi: freeing client_handle len %d\n", (int) creds.client_handle.length)); xdr_free(xdr_authgssapi_creds, &creds); } PRINTF(("\n")); return AUTH_OK; error: if (creds.client_handle.length != 0) { PRINTF(("svcauth_gssapi: freeing client_handle len %d\n", (int) creds.client_handle.length)); xdr_free(xdr_authgssapi_creds, &creds); } PRINTF(("\n")); return ret; }
static void isc_httpd_recvdone(isc_task_t *task, isc_event_t *ev) { isc_region_t r; isc_result_t result; isc_httpd_t *httpd = ev->ev_arg; isc_socketevent_t *sev = (isc_socketevent_t *)ev; isc_httpdurl_t *url; isc_time_t now; char datebuf[32]; /* Only need 30, but safety first */ ENTER("recv"); INSIST(ISC_HTTPD_ISRECV(httpd)); if (sev->result != ISC_R_SUCCESS) { NOTICE("recv destroying client"); destroy_client(&httpd); goto out; } result = process_request(httpd, sev->n); if (result == ISC_R_NOTFOUND) { if (httpd->recvlen >= HTTP_RECVLEN - 1) { destroy_client(&httpd); goto out; } r.base = (unsigned char *)httpd->recvbuf + httpd->recvlen; r.length = HTTP_RECVLEN - httpd->recvlen - 1; /* check return code? */ (void)isc_socket_recv(httpd->sock, &r, 1, task, isc_httpd_recvdone, httpd); goto out; } else if (result != ISC_R_SUCCESS) { destroy_client(&httpd); goto out; } ISC_HTTPD_SETSEND(httpd); /* * XXXMLG Call function here. Provide an add-header function * which will append the common headers to a response we generate. */ isc_buffer_initnull(&httpd->bodybuffer); isc_time_now(&now); isc_time_formathttptimestamp(&now, datebuf, sizeof(datebuf)); url = ISC_LIST_HEAD(httpd->mgr->urls); while (url != NULL) { if (strcmp(httpd->url, url->url) == 0) break; url = ISC_LIST_NEXT(url, link); } if (url == NULL) result = httpd->mgr->render_404(httpd->url, NULL, httpd->querystring, NULL, NULL, &httpd->retcode, &httpd->retmsg, &httpd->mimetype, &httpd->bodybuffer, &httpd->freecb, &httpd->freecb_arg); else result = url->action(httpd->url, url, httpd->querystring, httpd->headers, url->action_arg, &httpd->retcode, &httpd->retmsg, &httpd->mimetype, &httpd->bodybuffer, &httpd->freecb, &httpd->freecb_arg); if (result != ISC_R_SUCCESS) { result = httpd->mgr->render_500(httpd->url, url, httpd->querystring, NULL, NULL, &httpd->retcode, &httpd->retmsg, &httpd->mimetype, &httpd->bodybuffer, &httpd->freecb, &httpd->freecb_arg); RUNTIME_CHECK(result == ISC_R_SUCCESS); } isc_httpd_response(httpd); isc_httpd_addheader(httpd, "Content-Type", httpd->mimetype); isc_httpd_addheader(httpd, "Date", datebuf); isc_httpd_addheader(httpd, "Expires", datebuf); if (url != NULL && url->isstatic) { char loadbuf[32]; isc_time_formathttptimestamp(&url->loadtime, loadbuf, sizeof(loadbuf)); isc_httpd_addheader(httpd, "Last-Modified", loadbuf); isc_httpd_addheader(httpd, "Cache-Control: public", NULL); } else { isc_httpd_addheader(httpd, "Last-Modified", datebuf); isc_httpd_addheader(httpd, "Pragma: no-cache", NULL); isc_httpd_addheader(httpd, "Cache-Control: no-cache", NULL); } isc_httpd_addheader(httpd, "Server: libisc", NULL); isc_httpd_addheaderuint(httpd, "Content-Length", isc_buffer_usedlength(&httpd->bodybuffer)); isc_httpd_endheaders(httpd); /* done */ ISC_LIST_APPEND(httpd->bufflist, &httpd->headerbuffer, link); /* * Link the data buffer into our send queue, should we have any data * rendered into it. If no data is present, we won't do anything * with the buffer. */ if (isc_buffer_length(&httpd->bodybuffer) > 0) ISC_LIST_APPEND(httpd->bufflist, &httpd->bodybuffer, link); /* check return code? */ (void)isc_socket_sendv(httpd->sock, &httpd->bufflist, task, isc_httpd_senddone, httpd); out: isc_event_free(&ev); EXIT("recv"); }
static void isc_httpd_senddone(isc_task_t *task, isc_event_t *ev) { isc_httpd_t *httpd = ev->ev_arg; isc_region_t r; isc_socketevent_t *sev = (isc_socketevent_t *)ev; ENTER("senddone"); INSIST(ISC_HTTPD_ISSEND(httpd)); /* * First, unlink our header buffer from the socket's bufflist. This * is sort of an evil hack, since we know our buffer will be there, * and we know it's address, so we can just remove it directly. */ NOTICE("senddone unlinked header"); ISC_LIST_UNLINK(sev->bufferlist, &httpd->headerbuffer, link); /* * We will always want to clean up our receive buffer, even if we * got an error on send or we are shutting down. * * We will pass in the buffer only if there is data in it. If * there is no data, we will pass in a NULL. */ if (httpd->freecb != NULL) { isc_buffer_t *b = NULL; if (isc_buffer_length(&httpd->bodybuffer) > 0) b = &httpd->bodybuffer; httpd->freecb(b, httpd->freecb_arg); NOTICE("senddone free callback performed"); } if (ISC_LINK_LINKED(&httpd->bodybuffer, link)) { ISC_LIST_UNLINK(sev->bufferlist, &httpd->bodybuffer, link); NOTICE("senddone body buffer unlinked"); } if (sev->result != ISC_R_SUCCESS) { destroy_client(&httpd); goto out; } if ((httpd->flags & HTTPD_CLOSE) != 0) { destroy_client(&httpd); goto out; } ISC_HTTPD_SETRECV(httpd); NOTICE("senddone restarting recv on socket"); reset_client(httpd); r.base = (unsigned char *)httpd->recvbuf; r.length = HTTP_RECVLEN - 1; /* check return code? */ (void)isc_socket_recv(httpd->sock, &r, 1, task, isc_httpd_recvdone, httpd); out: isc_event_free(&ev); EXIT("senddone"); }
static void client_callback( int socket_id, void *cookie, int read_ready, int write_ready, int error_seen) { struct client *client = cookie; AIM_ASSERT(socket_id == client->fd); if (error_seen) { int socket_error = 0; socklen_t len = sizeof(socket_error); getsockopt(socket_id, SOL_SOCKET, SO_ERROR, &socket_error, &len); AIM_LOG_INFO("Error seen on CLI socket: %s", strerror(socket_error)); destroy_client(client); return; } if (read_ready) { int c; if ((c = read(client->fd, client->read_buffer+client->read_buffer_offset, READ_BUFFER_SIZE - client->read_buffer_offset)) < 0) { AIM_LOG_ERROR("read failed: %s", strerror(errno)); return; } client->read_buffer_offset += c; if (c == 0) { /* Peer has shutdown their write side */ if (client->write_buffer_len == 0 && aim_pvs_buffer_size(client->write_pvs) == 0) { destroy_client(client); } else { /* We'll destroy the client once we've finished writing to it */ ind_soc_data_in_pause(client->fd); client->read_finished = true; } return; } /* Process each complete line */ char *newline; char *start = client->read_buffer; int remaining = client->read_buffer_offset; while ((newline = memchr(start, '\n', remaining))) { *newline = '\0'; ucli_dispatch_string(client->ucli, client->write_pvs, start); remaining -= newline - start + 1; start = newline + 1; } /* Move incomplete line (which may be empty) to the beginning of the read buffer */ if (client->read_buffer != start) { memmove(client->read_buffer, start, remaining); client->read_buffer_offset = remaining; } else if (client->read_buffer_offset == READ_BUFFER_SIZE) { AIM_LOG_WARN("Disconnecting CLI client due to too-long line"); destroy_client(client); return; } if (aim_pvs_buffer_size(client->write_pvs) > 0) { ind_soc_data_out_ready(socket_id); } } if (write_ready) { /* Copy PVS data into our write buffer and reset PVS */ if (client->write_buffer == NULL) { client->write_buffer = aim_pvs_buffer_get(client->write_pvs); client->write_buffer_len = aim_pvs_buffer_size(client->write_pvs); client->write_buffer_offset = 0; /* aim_pvs_buffer_reset has a bug, workaround it */ aim_pvs_destroy(client->write_pvs); client->write_pvs = aim_pvs_buffer_create(); } int c = send(client->fd, client->write_buffer+client->write_buffer_offset, client->write_buffer_len-client->write_buffer_offset, MSG_NOSIGNAL); if (c <= 0) { AIM_LOG_ERROR("write failed: %s", strerror(errno)); destroy_client(client); return; } client->write_buffer_offset += c; /* Free our write buffer if we're finished with it */ if (client->write_buffer_len == client->write_buffer_offset) { aim_free(client->write_buffer); client->write_buffer_len = client->write_buffer_offset = 0; client->write_buffer = NULL; if (aim_pvs_buffer_size(client->write_pvs) == 0) { ind_soc_data_out_clear(client->fd); if (client->read_finished) { destroy_client(client); } } } } }