static void FUNCTION_ATTRIBUTE http_exit(HTTP_RESULT http_result) { http_flag = 0; PRINTF("htt_result:%d\n", http_result); free_req(); if(HTTP_TIMEOUT == http_result) { PRINTF("http time out!\n"); free_http_buf(); free_conn(); user_cb(NULL); } else { os_timer_disarm(&timeout_timer); } if(DNS_FAIL == http_result) { free_http_buf(); user_cb(NULL); } else if(HTTP_ERR == http_result) { free_http_buf(); free_conn(); user_cb(NULL); } if(HTTP_OK == http_result) { free_conn(); const char * version = "HTTP/1.1 "; if (os_strncmp(http_buf->buffer, version, os_strlen(version)) != 0) { PRINTF("Invalid version in %s\n", http_buf->buffer); return; } int http_status = atoi(http_buf->buffer + os_strlen(version)); char * body = (char *)os_strstr(http_buf->buffer, "\r\n\r\n") + 4; if (user_cb != NULL) { // Callback is optional. user_cb(body); } free_http_buf(); } }
static struct fuse_conn *new_conn(void) { struct fuse_conn *fc; fc = kmalloc(sizeof(*fc), GFP_KERNEL); if (fc != NULL) { int i; memset(fc, 0, sizeof(*fc)); init_waitqueue_head(&fc->waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->unused_list); INIT_LIST_HEAD(&fc->background); sema_init(&fc->outstanding_sem, 0); init_rwsem(&fc->sbput_sem); for (i = 0; i < FUSE_MAX_OUTSTANDING; i++) { struct fuse_req *req = fuse_request_alloc(); if (!req) { free_conn(fc); return NULL; } list_add(&req->list, &fc->unused_list); } #ifdef KERNEL_2_6_6_PLUS fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.unplug_io_fn = default_unplug_io_fn; #endif fc->reqctr = 0; } return fc; }
void close_and_free(EV_P_ struct conn *conn) { if(conn != NULL) { ev_io_stop(EV_A_ &conn->send_ctx->io); ev_io_stop(EV_A_ &conn->recv_ctx->io); if(debug) printf("close fd %d\n", conn->fd); if(conn->type == 0) { ev_timer_stop(EV_A_ &conn->recv_ctx->watcher); } if(conn->type == 1) { ev_timer_stop(EV_A_ &conn->send_ctx->watcher); } close(conn->fd); free_conn(conn); } }
/** \brief Create an NVMf fabric connection from the given parameters and schedule it on a reactor thread. \code # identify reactor where the new connections work item will be scheduled reactor = nvmf_allocate_reactor() schedule fabric connection work item on reactor \endcode */ int spdk_nvmf_startup_conn(struct spdk_nvmf_conn *conn) { int lcore; struct spdk_nvmf_conn *admin_conn; uint64_t nvmf_session_core = spdk_app_get_core_mask(); /* * if starting IO connection then determine core * allocated to admin queue to request core mask. * Can not assume nvmf session yet created at time * of fabric connection setup. Rely on fabric * function to locate matching controller session. */ if (conn->type == CONN_TYPE_IOQ && conn->cntlid != 0) { admin_conn = spdk_find_nvmf_conn_by_cntlid(conn->cntlid); if (admin_conn != NULL) { SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Located admin conn session core %d\n", admin_conn->poller.lcore); nvmf_session_core = 1ULL << admin_conn->poller.lcore; } } lcore = nvmf_allocate_reactor(nvmf_session_core); if (lcore < 0) { SPDK_ERRLOG("Unable to find core to launch connection.\n"); goto err0; } conn->state = CONN_STATE_RUNNING; SPDK_NOTICELOG("Launching nvmf connection[qid=%d] on core: %d\n", conn->qid, lcore); conn->poller.fn = spdk_nvmf_conn_do_work; conn->poller.arg = conn; rte_atomic32_inc(&g_num_connections[lcore]); spdk_poller_register(&conn->poller, lcore, NULL); return 0; err0: free_conn(conn); return -1; }
/** Remove a connection object from the list of maintained * connections. * \param c the object to clean up. */ void delete_conn(struct conn *c) { struct conn *curr, *nxt; for (curr = connections; curr; curr = nxt) { nxt = curr->next; if (curr == c) { if (curr->prev) { curr->prev->next = nxt; if (nxt) nxt->prev = curr->prev; } else { connections = nxt; if (connections) connections->prev = NULL; } free_conn(c); break; } } }
static struct fuse_conn *get_conn(struct file *file, struct super_block *sb) { struct fuse_conn *fc; if (file->f_op != &fuse_dev_operations) return ERR_PTR(-EINVAL); fc = new_conn(); if (fc == NULL) return ERR_PTR(-ENOMEM); spin_lock(&fuse_lock); if (file->private_data) { free_conn(fc); fc = ERR_PTR(-EINVAL); } else { file->private_data = fc; *get_fuse_conn_super_p(sb) = fc; fc->mounted = 1; fc->connected = 1; fc->count = 2; } spin_unlock(&fuse_lock); return fc; }
static void _conn_destruct(spdk_event_t event) { struct spdk_nvmf_conn *conn = spdk_event_get_arg1(event); /* * Notify NVMf library of the fabric connection * going away. If this is the AQ connection then * set state for other connections to abort. */ nvmf_disconnect((void *)conn, conn->sess); if (conn->type == CONN_TYPE_AQ) { SPDK_TRACELOG(SPDK_TRACE_DEBUG, "AQ connection destruct, trigger session closure\n"); /* Trigger all I/O connections to shutdown */ conn->state = CONN_STATE_FABRIC_DISCONNECT; } nvmf_rdma_conn_cleanup(conn); pthread_mutex_lock(&g_conns_mutex); free_conn(conn); pthread_mutex_unlock(&g_conns_mutex); }
//Acquires information, initiates a connect and initialises a new connection //object. Return NULL if anything fails, pointer to object otherwise tproxy_conn_t* add_tcp_connection(int efd, struct tailhead *conn_list, int local_fd, uint16_t listen_port) { struct sockaddr_storage orig_dst; tproxy_conn_t *conn; int remote_fd; struct epoll_event ev; if(get_org_dstaddr(local_fd, &orig_dst)){ fprintf(stderr, "Could not get local address\n"); close(local_fd); return NULL; } if (check_local_ip((struct sockaddr*)&orig_dst)==1 && saport((struct sockaddr*)&orig_dst)==listen_port) { fprintf(stderr, "Dropping connection to local address to the same port to avoid loop\n"); close(local_fd); return NULL; } if((remote_fd = connect_remote(&orig_dst)) == 0){ fprintf(stderr, "Failed to connect\n"); close(remote_fd); close(local_fd); return NULL; } //Create connection object and fill in information if((conn = (tproxy_conn_t*) malloc(sizeof(tproxy_conn_t))) == NULL){ fprintf(stderr, "Could not allocate memory for connection\n"); close(remote_fd); close(local_fd); return NULL; } memset(conn, 0, sizeof(tproxy_conn_t)); conn->state = CONN_AVAILABLE; conn->remote_fd = remote_fd; conn->local_fd = local_fd; if(pipe(conn->splice_pipe) != 0){ fprintf(stderr, "Could not create the required pipe\n"); free_conn(conn); return NULL; } //remote_fd is connecting. Non-blocking connects are signaled as done by //socket being marked as ready for writing memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN | EPOLLOUT; ev.data.ptr = (void*) conn; if(epoll_ctl(efd, EPOLL_CTL_ADD, remote_fd, &ev) == -1){ perror("epoll_ctl (remote_fd)"); free_conn(conn); return NULL; } //Local socket can be closed while waiting for connection attempt. I need //to detect this when waiting for connect() to complete. However, I dont //want to get EPOLLIN-events, as I dont want to receive any data before //remote connection is established ev.events = EPOLLRDHUP; if(epoll_ctl(efd, EPOLL_CTL_ADD, local_fd, &ev) == -1){ perror("epoll_ctl (local_fd)"); free_conn(conn); return NULL; } else { TAILQ_INSERT_HEAD(conn_list, conn, conn_ptrs); return conn; } }
/* Must be called with the fuse lock held */ void fuse_release_conn(struct fuse_conn *fc) { fc->count--; if (!fc->count) free_conn(fc); }
int main(int argc, char *argv[]){ //prepare curl char url[]="https://hotel.psc.edu:8086/"; int ssl_verify = 1; libinflux_init(); influxConn *hostA = create_conn(url, "test", "dbuser", "TcitoPsb", ssl_verify); CURLcode res; set_callback(hostA, dataCallback); //parse arguments printf("%s:\n", argv[0]); while( (argc > 1) && (argv[1][0] == '-') ){ switch (argv[1][1]) { case 's': // set ssl peer verification. On: 1, Off: 0 if(argv[1][3] == '0') ssl_verify = 0; else if(argv[1][3] == '1') ssl_verify = 1; //arguments were consumed argv += 2; argc -= 2; break; case 'q': // query hostA->ssl = ssl_verify; res = influxQuery(hostA, &argv[1][3]); if( res != CURLE_OK){ fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); } //arguments were consumed argv += 2; argc -= 2; break; case 'w': // write hostA->ssl = ssl_verify; res = influxWrite(hostA, &argv[1][3]); if( res != CURLE_OK){ fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); } //arguments were consumed argv += 2; argc -= 2; break; case 't': // test/check connection hostA->ssl = ssl_verify; res = influxCheck(hostA); if( res != CURLE_OK) fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); //arguments were consumed argv += 1; argc -= 1; break; case 'd': // enable debug set_debug(true); //arguments were consumed argv += 1; argc -= 1; break; default: printf("Invalid argument: %s\n", argv[1]); usage(); } } free_conn(hostA); libinflux_cleanup(); return 0; }
CAMLprim value PQfinish_stub(value v_conn) { free_conn(v_conn); return Val_unit; }