/** * Function that can be used to force the plugin to disconnect * from the given peer and cancel all previous transmissions * (and their continuationc). * * @param cls closure * @param target peer from which to disconnect */ static void http_client_plugin_disconnect (void *cls, const struct GNUNET_PeerIdentity *target) { struct HTTP_Client_Plugin *plugin = cls; struct Session *next = NULL; struct Session *pos = NULL; GNUNET_log_from (GNUNET_ERROR_TYPE_DEBUG, plugin->name, "Transport tells me to disconnect `%s'\n", GNUNET_i2s (target)); next = plugin->head; while (NULL != (pos = next)) { next = pos->next; if (0 == memcmp (target, &pos->target, sizeof (struct GNUNET_PeerIdentity))) { GNUNET_log_from (GNUNET_ERROR_TYPE_DEBUG, plugin->name, "Disconnecting session %p to `%pos'\n", pos, GNUNET_i2s (target)); GNUNET_assert (GNUNET_OK == client_disconnect (pos)); } } }
void client_sendall(char* buff, const size_t buffsize) { int clientindex = 0; int setindex = client2set( 0 ); while (clientindex < g_clients.m_count) { struct pollfd* set = g_set + setindex; struct TClient* client = g_clients.m_client + clientindex; int sendsize = send( client->m_sock, buff, buffsize, MSG_DONTWAIT ); if ( sendsize == -1 ) { if ( errno == EWOULDBLOCK ) { sendsize = 0; } else { error_send( errno ); client_disconnect( client ); continue; } } if ( sendsize < buffsize ) { ++g_clients.m_blocked; client->m_sendbuff = buff + sendsize; client->m_remain = buffsize - sendsize; set->events |= POLLOUT; } ++clientindex; ++setindex; } }
void ssl_socket::close_impl( void ) { client_disconnect(&m_hCreds, &m_hContext); _socket.close(); }
static int fetch_stream_continue(struct imap_fetch_context *ctx) { struct imap_fetch_state *state = &ctx->state; const char *disconnect_reason; uoff_t orig_input_offset = state->cur_input->v_offset; enum ostream_send_istream_result res; o_stream_set_max_buffer_size(ctx->client->output, 0); res = o_stream_send_istream(ctx->client->output, state->cur_input); o_stream_set_max_buffer_size(ctx->client->output, (size_t)-1); if (ctx->state.cur_stats_sizep != NULL) { *ctx->state.cur_stats_sizep += state->cur_input->v_offset - orig_input_offset; } switch (res) { case OSTREAM_SEND_ISTREAM_RESULT_FINISHED: if (state->cur_input->v_offset != state->cur_size) { /* Input stream gave less data than expected */ mail_set_cache_corrupted(state->cur_mail, state->cur_size_field, t_strdup_printf( "read(%s): FETCH %s got too little data: " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, i_stream_get_name(state->cur_input), state->cur_human_name, state->cur_input->v_offset, state->cur_size)); client_disconnect(ctx->client, "FETCH failed"); return -1; } return 1; case OSTREAM_SEND_ISTREAM_RESULT_WAIT_INPUT: i_unreached(); case OSTREAM_SEND_ISTREAM_RESULT_WAIT_OUTPUT: return 0; case OSTREAM_SEND_ISTREAM_RESULT_ERROR_INPUT: fetch_read_error(ctx, &disconnect_reason); client_disconnect(ctx->client, disconnect_reason); return -1; case OSTREAM_SEND_ISTREAM_RESULT_ERROR_OUTPUT: /* client disconnected */ return -1; } i_unreached(); }
static int fetch_stream_continue(struct imap_fetch_context *ctx) { struct imap_fetch_state *state = &ctx->state; const char *disconnect_reason; uoff_t orig_input_offset = state->cur_input->v_offset; int ret; o_stream_set_max_buffer_size(ctx->client->output, 0); ret = o_stream_send_istream(ctx->client->output, state->cur_input); o_stream_set_max_buffer_size(ctx->client->output, (size_t)-1); if (ctx->state.cur_stats_sizep != NULL) { *ctx->state.cur_stats_sizep += state->cur_input->v_offset - orig_input_offset; } if (state->cur_input->v_offset != state->cur_size) { /* unfinished */ if (state->cur_input->stream_errno != 0) { fetch_read_error(ctx, &disconnect_reason); client_disconnect(ctx->client, disconnect_reason); return -1; } if (!i_stream_have_bytes_left(state->cur_input)) { /* Input stream gave less data than expected */ mail_set_cache_corrupted_reason(state->cur_mail, state->cur_size_field, t_strdup_printf( "read(%s): FETCH %s got too little data: " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, i_stream_get_name(state->cur_input), state->cur_human_name, state->cur_input->v_offset, state->cur_size)); client_disconnect(ctx->client, "FETCH failed"); return -1; } if (ret < 0) { /* client probably disconnected */ return -1; } o_stream_set_flush_pending(ctx->client->output, TRUE); return 0; } return 1; }
static int fetch_stream_continue(struct imap_fetch_context *ctx) { struct imap_fetch_state *state = &ctx->state; off_t ret; o_stream_set_max_buffer_size(ctx->client->output, 0); ret = o_stream_send_istream(ctx->client->output, state->cur_input); o_stream_set_max_buffer_size(ctx->client->output, (size_t)-1); if (ret > 0) state->cur_offset += ret; if (state->cur_offset != state->cur_size) { /* unfinished */ if (state->cur_input->stream_errno != 0) { fetch_read_error(ctx); client_disconnect(ctx->client, "FETCH failed"); return -1; } if (!i_stream_have_bytes_left(state->cur_input)) { /* Input stream gave less data than expected */ i_error("read(%s): FETCH %s for mailbox %s UID %u " "got too little data: " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, i_stream_get_name(state->cur_input), state->cur_human_name, mailbox_get_vname(state->cur_mail->box), state->cur_mail->uid, state->cur_offset, state->cur_size); mail_set_cache_corrupted(state->cur_mail, state->cur_size_field); client_disconnect(ctx->client, "FETCH failed"); return -1; } if (ret < 0) { /* client probably disconnected */ return -1; } o_stream_set_flush_pending(ctx->client->output, TRUE); return 0; } return 1; }
static void __cleanup(void) { if (SDL_WasInit(SDL_INIT_VIDEO | SDL_INIT_TIMER)) { SDL_Quit(); } if (NULL != timer_id) { SDL_RemoveTimer(timer_id); timer_id = NULL; } if (NULL != tanks_timer_id) { SDL_RemoveTimer(tanks_timer_id); tanks_timer_id = NULL; } if (0 != display_lists) { glDeleteLists(display_lists, DISPLAY_LISTS_COUNT); display_lists = 0; } if (l) { landscape_destroy(l); l = NULL; } if (viewer_protocol.connected) { check(client_disconnect(&viewer_protocol, false), "Failed to disconnect.", ""); } if (shoots) { dynamic_array_destroy(shoots); shoots = NULL; } if (explosions) { dynamic_array_destroy(explosions); explosions = NULL; } static bool net_stopped = false; error: if (!net_stopped) { client_net_stop(); net_stopped = true; } }
static void returncode_bufferev_write_cb(struct bufferevent *bufferev, void *client_) { Client * const client = client_; (void) bufferev; logfile(LOG_DEBUG, _("Return code successfully written for fd #%d"), client->client_fd); client_disconnect(client); }
static void returncode_bufferev_error_cb(struct bufferevent * const bufferev, short what, void *client_) { Client * const client = client_; (void) bufferev; logfile(LOG_DEBUG, _("Error %d when sending return code to fd #%d"), (int) what, client->client_fd); client_disconnect(client); }
static bool cmd_getscript_continue(struct client_command_context *cmd) { struct client *client = cmd->client; struct cmd_getscript_context *ctx = cmd->context; switch (o_stream_send_istream(client->output, ctx->script_stream)) { case OSTREAM_SEND_ISTREAM_RESULT_FINISHED: if ( ctx->script_stream->v_offset != ctx->script_size && !ctx->failed ) { /* Input stream gave less data than expected */ sieve_storage_set_critical(ctx->storage, "GETSCRIPT for script `%s' from %s got too little data: " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, sieve_script_name(ctx->script), sieve_script_location(ctx->script), ctx->script_stream->v_offset, ctx->script_size); client_disconnect(ctx->client, "GETSCRIPT failed"); ctx->failed = TRUE; } break; case OSTREAM_SEND_ISTREAM_RESULT_WAIT_INPUT: i_unreached(); case OSTREAM_SEND_ISTREAM_RESULT_WAIT_OUTPUT: return FALSE; case OSTREAM_SEND_ISTREAM_RESULT_ERROR_INPUT: sieve_storage_set_critical(ctx->storage, "o_stream_send_istream() failed for script `%s' from %s: %s", sieve_script_name(ctx->script), sieve_script_location(ctx->script), i_stream_get_error(ctx->script_stream)); ctx->failed = TRUE; break; case OSTREAM_SEND_ISTREAM_RESULT_ERROR_OUTPUT: client_disconnect(ctx->client, io_stream_get_disconnect_reason(client->input, client->output)); ctx->failed = TRUE; break; } return cmd_getscript_finish(ctx); }
/** * Session was idle, so disconnect it */ static void client_session_timeout (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) { GNUNET_assert (NULL != cls); struct Session *s = cls; s->timeout_task = GNUNET_SCHEDULER_NO_TASK; GNUNET_log (TIMEOUT_LOG, "Session %p was idle for %llu ms, disconnecting\n", s, (unsigned long long) CLIENT_SESSION_TIMEOUT.rel_value); /* call session destroy function */ GNUNET_assert (GNUNET_OK == client_disconnect (s)); }
void client_outmessage (enum outmsg_type msgtype, char *str) { char buf[1024]; GTET_O_STRCPY(buf, outmsg_translate (msgtype)); if (str) { GTET_O_STRCAT(buf, " "); GTET_O_STRCAT(buf, str); } switch (msgtype) { case OUT_DISCONNECT : client_disconnect (); break; case OUT_CONNECTED : client_connected (); break; default : client_sendmsg (buf); } }
/** * Exit point from the plugin. * * @param cls api as closure * @return NULL */ void * LIBGNUNET_PLUGIN_TRANSPORT_DONE (void *cls) { struct GNUNET_TRANSPORT_PluginFunctions *api = cls; struct HTTP_Client_Plugin *plugin = api->cls; struct Session *pos; struct Session *next; if (NULL == api->cls) { /* Stub shutdown */ GNUNET_free (api); return NULL; } GNUNET_log_from (GNUNET_ERROR_TYPE_DEBUG, plugin->name, _("Shutting down plugin `%s'\n"), plugin->name); next = plugin->head; while (NULL != (pos = next)) { next = pos->next; client_disconnect (pos); } if (GNUNET_SCHEDULER_NO_TASK != plugin->client_perform_task) { GNUNET_SCHEDULER_cancel (plugin->client_perform_task); plugin->client_perform_task = GNUNET_SCHEDULER_NO_TASK; } if (NULL != plugin->curl_multi_handle) { curl_multi_cleanup (plugin->curl_multi_handle); plugin->curl_multi_handle = NULL; } curl_global_cleanup (); GNUNET_log_from (GNUNET_ERROR_TYPE_DEBUG, plugin->name, _("Shutdown for plugin `%s' complete\n"), plugin->name); GNUNET_free (plugin); GNUNET_free (api); return NULL; }
int clients_loop(char* recvbuff, int nready) { int setindex = client2set( 0 ); int clientindex = 0; while ( nready > 0 && clientindex < g_clients.m_count ) { struct pollfd* set = g_set + setindex; struct TClient* client = g_clients.m_client + clientindex; /* check for events */ if ( !set->revents ) { ++clientindex; ++setindex; continue; } /* disconnect client */ if ( set->revents & POLLHUP ) { --nready; client_disconnect( client ); continue; } /* can send to client */ if ( set->revents & POLLOUT ) { nready = client_POLLOUT( nready, set, client, recvbuff ); continue; } /* read from client */ if ( set->revents & POLLIN ) { nready = client_POLLIN( nready, set, client, recvbuff ); continue; } assert( !set->revents ); //WTF? } int inone = p_iomode == IOMODE_NONE || p_inmode == IOMODE_NONE; int ionce = p_iomode == IOMODE_ONCE || p_inmode == IOMODE_ONCE; if ( (g_clients.m_blocked == 0) && (inone || ionce) ) { g_set[STDIN_FILENO].events |= POLLIN; signals_cansyncterm(); } return nready; }
void client_tdisconnect(struct TClient* client) { if ( 0 < client->m_remain ) { return; } if ( p_wait.tv_usec == -1 ) { return; } if ( timer_iszero( &p_wait ) ) { client_disconnect( client ); return; } timer_init( client ); }
/** * Session was idle, so disconnect it */ static void session_timeout (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) { GNUNET_assert (NULL != cls); struct Session *s = cls; s->timeout_task = GNUNET_SCHEDULER_NO_TASK; GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Session %p was idle for %llu, disconnecting\n", s, GNUNET_CONSTANTS_IDLE_CONNECTION_TIMEOUT.rel_value); /* call session destroy function */ if (s->inbound == GNUNET_NO) GNUNET_assert (GNUNET_OK == client_disconnect (s)); else GNUNET_assert (GNUNET_OK == server_disconnect (s)); }
// Close connection and recycle structure static void connection_destroy(struct connection *co) { if (co->link) { ZOOM_connection_destroy(co->link); iochan_destroy(co->iochan); } yaz_log(YLOG_DEBUG, "%p Connection destroy %s", co, co->host->url); if (co->client) { client_disconnect(co->client); } xfree(co->zproxy); xfree(co); connection_use(-1); }
static void on_client_event(struct bufferevent *bev, short why, void *ctx) { Client *cl = ctx; SocksLink *sl = cl->parent; if (why & EVBUFFER_EOF) { /* Client disconnected, remove the read event and the * free the client structure. */ pr_debug(sl, "client disconnected"); client_drop(cl); } else if (why & EVBUFFER_TIMEOUT) { pr_debug(sl, "client timeout"); client_disconnect(cl); } else if (cl->client.fd != -1) { pr_debug(sl, "client socket error, disconnecting"); client_drop(cl); } }
static bool cmd_fetch_finish(struct imap_fetch_context *ctx, struct client_command_context *cmd) { static const char *ok_message = "OK Fetch completed."; const char *tagged_reply = ok_message; enum mail_error error; bool failed, seen_flags_changed = ctx->state.seen_flags_changed; if (ctx->state.skipped_expunged_msgs) { tagged_reply = "OK ["IMAP_RESP_CODE_EXPUNGEISSUED"] " "Some messages were already expunged."; } failed = imap_fetch_end(ctx) < 0; imap_fetch_free(&ctx); if (failed) { const char *errstr; if (cmd->client->output->closed) { client_disconnect(cmd->client, "Disconnected"); return TRUE; } errstr = mailbox_get_last_error(cmd->client->mailbox, &error); if (error == MAIL_ERROR_CONVERSION || error == MAIL_ERROR_INVALIDDATA) { /* a) BINARY found unsupported Content-Transfer-Encoding b) Content was invalid */ tagged_reply = t_strdup_printf( "NO ["IMAP_RESP_CODE_UNKNOWN_CTE"] %s", errstr); } else { /* We never want to reply NO to FETCH requests, BYE is preferrable (see imap-ml for reasons). */ client_disconnect_with_error(cmd->client, errstr); return TRUE; } } return cmd_sync(cmd, (seen_flags_changed ? 0 : MAILBOX_SYNC_FLAG_FAST) | (cmd->uid ? 0 : MAILBOX_SYNC_FLAG_NO_EXPUNGES), 0, tagged_reply); }
/** * Function that can be used to force the plugin to disconnect * from the given peer and cancel all previous transmissions * (and their continuationc). * * @param cls closure * @param target peer from which to disconnect */ static void http_plugin_disconnect (void *cls, const struct GNUNET_PeerIdentity *target) { struct Plugin *plugin = cls; struct Session *next = NULL; struct Session *s = plugin->head; GNUNET_log_from (GNUNET_ERROR_TYPE_DEBUG, plugin->name, "Transport tells me to disconnect `%s'\n", GNUNET_i2s (target)); while (s != NULL) { next = s->next; if (0 == memcmp (target, &s->target, sizeof (struct GNUNET_PeerIdentity))) { if (s->inbound == GNUNET_NO) GNUNET_assert (GNUNET_OK == client_disconnect (s)); else GNUNET_assert (GNUNET_OK == server_disconnect (s)); GNUNET_CONTAINER_DLL_remove (plugin->head, plugin->tail, s); struct HTTP_Message *msg = s->msg_head; struct HTTP_Message *tmp = NULL; while (msg != NULL) { tmp = msg->next; GNUNET_CONTAINER_DLL_remove (s->msg_head, s->msg_tail, msg); if (msg->transmit_cont != NULL) { msg->transmit_cont (msg->transmit_cont_cls, target, GNUNET_SYSERR); } GNUNET_free (msg); msg = tmp; } delete_session (s); } s = next; } }
static int fetch_stream_send_direct(struct imap_fetch_context *ctx) { off_t ret; o_stream_set_max_buffer_size(ctx->client->output, 0); ret = o_stream_send_istream(ctx->client->output, ctx->cur_input); o_stream_set_max_buffer_size(ctx->client->output, (size_t)-1); if (ret < 0) return -1; ctx->cur_offset += ret; if (ctx->cur_append_eoh && ctx->cur_offset + 2 == ctx->cur_size) { /* Netscape missing EOH workaround. */ if (o_stream_send(ctx->client->output, "\r\n", 2) < 0) return -1; ctx->cur_offset += 2; ctx->cur_append_eoh = FALSE; } if (ctx->cur_offset != ctx->cur_size) { /* unfinished */ if (!i_stream_have_bytes_left(ctx->cur_input)) { /* Input stream gave less data than expected */ i_error("FETCH %s for mailbox %s UID %u " "got too little data (copying): " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, ctx->cur_name, mailbox_get_vname(ctx->mail->box), ctx->mail->uid, ctx->cur_offset, ctx->cur_size); mail_set_cache_corrupted(ctx->mail, ctx->cur_size_field); client_disconnect(ctx->client, "FETCH failed"); return -1; } o_stream_set_flush_pending(ctx->client->output, TRUE); return 0; } return 1; }
static bool cmd_getscript_finish(struct cmd_getscript_context *ctx) { struct client *client = ctx->client; if ( ctx->script != NULL ) sieve_script_unref(&ctx->script); if ( ctx->failed ) { if ( client->output->closed ) { client_disconnect(client, "Disconnected"); return TRUE; } client_send_storage_error(client, client->storage); return TRUE; } client_send_line(client, ""); client_send_ok(client, "Getscript completed."); return TRUE; }
int main(int argc, char **argv) { struct timeval start, end, dt; struct timeval t1, t2, dt1, dt2, dt3; if (argc != 4) die("usage: client <server-address> <server-port> <send_buffer_size>"); int count = atoi(argv[3]); send_buffer_size = count * sysconf(_SC_PAGESIZE); recv_buffer_size = send_buffer_size; //send_buffer_size = atoi(argv[3]); //recv_buffer_size = send_buffer_size; gettimeofday(&start, NULL); client_test(argv[1], argv[2]);//set up connection and exchange buffer_key and buffer_addr; gettimeofday(&t1, NULL); //client_write((uint32_t) send_buffer_size); //printf("write once again\n"); client_write_once((uint32_t) send_buffer_size); gettimeofday(&t2, NULL); client_disconnect();//disconnect and free locked memory; gettimeofday(&end, NULL); timersub(&end, &start, &dt); timersub(&t1, &start, &dt1); timersub(&t2, &t1, &dt2); timersub(&end, &t2, &dt3); long job_usec = dt.tv_usec + 1000000 * dt.tv_sec; printf("dt1 = %ld\n", dt1.tv_usec+1000000*dt1.tv_sec); printf("dt2 = %ld\n", dt2.tv_usec+1000000*dt2.tv_sec); printf("dt3 = %ld\n", dt3.tv_usec+1000000*dt3.tv_sec); printf("time used for a whole transfer job_usec=%ld\n", job_usec); return 0; }
static void client_connect_server(Client *cl) { SocksLink *sl = cl->parent; /* * If the client is dummy, he may send data before receiving authentication * result, we must keep data by setting a very low high-watermark with a dummy * callback */ bufferevent_setcb(cl->client.bufev, on_client_read_dummy, on_client_write, on_client_event, cl); bufferevent_setwatermark(cl->client.bufev, EV_READ, 0, 1); if (!sl->helpers_max) { server_connect(cl, &sl->nexthop_addr, sl->nexthop_addrlen); } else { if (helper_call(cl)) { /* No helper available, drop client (he may try to reconnect later) */ client_disconnect(cl); } } }
int client_POLLOUT(int nready, struct pollfd* set, struct TClient* client, char* recvbuff) { set->revents ^= POLLOUT; --nready; const int sock = client->m_sock; const char* sendbuff = client->m_sendbuff; const int remain = client->m_remain; int sendsize = send( sock, sendbuff, remain, MSG_DONTWAIT | MSG_NOSIGNAL ); if ( sendsize == -1 ) { assert( errno != EWOULDBLOCK ); //WTF? /* if error */ error_send( errno ); client_disconnect( client ); return nready; } if ( sendsize < remain ) { /* if not all sended */ client->m_sendbuff += sendsize; client->m_remain -= sendsize; } else { /* if all sended */ client->m_sendbuff = NULL; client->m_remain = 0; set->events &= ~POLLOUT; --g_clients.m_blocked; if ( g_set[STDIN_FILENO].fd == -1 ) //stdin closed { client_tdisconnect( client ); } } return nready; }
void numbers(void){ int now = glutGet(GLUT_ELAPSED_TIME); int elapsedMilliseconds = now - lastFrameTime; float elapsedTime = elapsedMilliseconds / 1000.0f; lastFrameTime = now; float h = elapsedTime; client_update(tclient, h); while (enet_host_service (enet_client, & event, 10) > 0 && client_connection(tclient)) { switch (event.type) { case ENET_EVENT_TYPE_RECEIVE: client_process_packets(tclient, &event); break; case ENET_EVENT_TYPE_DISCONNECT: client_disconnect(tclient); break; } } }
int smsa_client_operation( uint32_t op, unsigned char *block ) { // Things needed when receiving packet int16_t ret = 0; // Return 0 or -1 int blkbytes; // How many bytes left uint32_t rop; // Return operation if ( SMSA_OPCODE(op) == 0x0 ) { // If mount if ((server_socket = client_connect()) == -1) { return -1; } } if ( send_packet( server_socket, op, 0, block ) == -1 ) { return( -1 ); } /*if ( wait_read(server_socket) == -1 ) { return -1; }*/ if ( receive_packet( server_socket, &rop, &ret, &blkbytes, block ) == -1 ) { return( -1 ); } // Now check the op code if ( op != rop ) { return( -1 ); } if ( SMSA_OPCODE(op) == 0x1 ) { // If unmount if (client_disconnect() == -1) { return -1; } } return ret; }
static bool cmd_getscript_continue(struct client_command_context *cmd) { struct client *client = cmd->client; struct cmd_getscript_context *ctx = cmd->context; off_t ret; ret = o_stream_send_istream(client->output, ctx->script_stream); if ( ret < 0 ) { sieve_storage_set_critical(ctx->storage, "o_stream_send_istream(%s) failed: %m", sieve_script_filename(ctx->script)); ctx->failed = TRUE; return cmd_getscript_finish(ctx); } ctx->script_offset += ret; if ( ctx->script_offset != ctx->script_size && !ctx->failed ) { /* unfinished */ if ( !i_stream_have_bytes_left(ctx->script_stream) ) { /* Input stream gave less data than expected */ sieve_storage_set_critical(ctx->storage, "GETSCRIPT for SCRIPT %s got too little data: " "%"PRIuUOFF_T" vs %"PRIuUOFF_T, sieve_script_name(ctx->script), ctx->script_offset, ctx->script_size); client_disconnect(ctx->client, "GETSCRIPT failed"); ctx->failed = TRUE; return cmd_getscript_finish(ctx); } return FALSE; } return cmd_getscript_finish(ctx); }
void client_destroy(struct client *client, const char *prefix, const char *reason) { if (client->destroyed) return; client->destroyed = TRUE; client_disconnect(client, prefix, reason); submission_client_count--; DLLIST_REMOVE(&submission_clients, client); if (client->proxy_conn != NULL) smtp_client_connection_close(&client->proxy_conn); if (client->anvil_sent) { master_service_anvil_send(master_service, t_strconcat( "DISCONNECT\t", my_pid, "\tsubmission/", mail_user_get_anvil_userip_ident(client->user), "\n", NULL)); } if (client->urlauth_ctx != NULL) imap_urlauth_deinit(&client->urlauth_ctx); mail_user_unref(&client->user); mail_storage_service_user_unref(&client->service_user); client_state_reset(client); i_free(client->session_id); i_free(client); master_service_client_connection_destroyed(master_service); submission_refresh_proctitle(); }
int main(int argc, char* argv[]) { tlog_init(TLOG_MODE_STDERR, TLOG_INFO, NULL); if (argc < 3) { TL_INFO("%s <ip> <output>", argv[0]); return 0; } rcp_connect(argv[1]); start_message_manager(); client_register(RCP_USER_LEVEL_LIVE, "", RCP_REGISTRATION_TYPE_NORMAL, RCP_ENCRYPTION_MODE_MD5); rcp_coder_list encoders, decoders; get_coder_list(RCP_CODER_ENCODER, RCP_MEDIA_TYPE_VIDEO, &encoders, 1); TL_DEBUG("***"); for (int i=0; i<encoders.count; i++) TL_DEBUG("%x %x %x %x %x", encoders.coder[i].number, encoders.coder[i].caps, encoders.coder[i].current_cap, encoders.coder[i].param_caps, encoders.coder[i].current_param); TL_DEBUG("***"); get_coder_list(RCP_CODER_DECODER, RCP_MEDIA_TYPE_VIDEO, &decoders, 1); TL_DEBUG("***"); for (int i=0; i<decoders.count; i++) TL_DEBUG("%x %x %x %x %x", decoders.coder[i].number, decoders.coder[i].caps, decoders.coder[i].current_cap, decoders.coder[i].param_caps, decoders.coder[i].current_param); TL_DEBUG("***"); rcp_session session; memset(&session, 0, sizeof(rcp_session)); unsigned short udp_port = stream_connect_udp(&session); TL_DEBUG("udp port = %d", udp_port); rcp_media_descriptor desc = { RCP_MEP_UDP, 1, 1, 0, udp_port, 1, 1, RCP_VIDEO_CODING_H264, RCP_VIDEO_RESOLUTION_4CIF }; client_connect(&session, RCP_CONNECTION_METHOD_GET, RCP_MEDIA_TYPE_VIDEO, 0, &desc); pthread_create(&thread, NULL, keep_alive_thread, &session); rtp_merge_desc mdesc; rtp_init(RTP_PAYLOAD_TYPE_H264, 1, &mdesc); signal(SIGTERM, term_handler); FILE * out = fopen(argv[2], "wb"); while (!end) { /* int num = recvfrom(con.stream_socket, buffer, 1500, 0, (struct sockaddr*)&si_remote, &slen); rtp_push_frame(buffer, num, &mdesc); */ if (rtp_recv(session.stream_socket, &mdesc) == 0) { if (rtp_pop_frame(&mdesc) == 0) { fwrite(mdesc.data, mdesc.frame_lenght, 1, out); } } //char cmd[100]; //sprintf(cmd, "kill %d", res); //system(cmd); //return 0; } fclose(out); pthread_cancel(thread); client_disconnect(&session); client_unregister(); stop_message_manager(); return 0; }