/** send the TCP queries and print answers */ static void send_em(const char* svr, int udp, int usessl, int noanswer, int num, char** qs) { sldns_buffer* buf = sldns_buffer_new(65553); int fd = open_svr(svr, udp); int i; SSL_CTX* ctx = NULL; SSL* ssl = NULL; if(!buf) fatal_exit("out of memory"); if(usessl) { ctx = connect_sslctx_create(NULL, NULL, NULL); if(!ctx) fatal_exit("cannot create ssl ctx"); ssl = outgoing_ssl_fd(ctx, fd); if(!ssl) fatal_exit("cannot create ssl"); while(1) { int r; ERR_clear_error(); if( (r=SSL_do_handshake(ssl)) == 1) break; r = SSL_get_error(ssl, r); if(r != SSL_ERROR_WANT_READ && r != SSL_ERROR_WANT_WRITE) { log_crypto_err("could not ssl_handshake"); exit(1); } } if(1) { X509* x = SSL_get_peer_certificate(ssl); if(!x) printf("SSL: no peer certificate\n"); else { X509_print_fp(stdout, x); X509_free(x); } } } for(i=0; i<num; i+=3) { printf("\nNext query is %s %s %s\n", qs[i], qs[i+1], qs[i+2]); write_q(fd, udp, ssl, buf, (uint16_t)get_random(), qs[i], qs[i+1], qs[i+2]); /* print at least one result */ if(!noanswer) recv_one(fd, udp, ssl, buf); } if(usessl) { SSL_shutdown(ssl); SSL_free(ssl); SSL_CTX_free(ctx); } #ifndef USE_WINSOCK close(fd); #else closesocket(fd); #endif sldns_buffer_free(buf); printf("orderly exit\n"); }
static int hsi_ch_net_write(int chno, void *data, int len) { /* Non blocking write */ void *buf = NULL; static struct x_data *d = NULL; int n = 0; int flag = 1; #ifdef XMD_TX_MULTI_PACKET if (d && hsi_channels[chno].write_queued == HSI_TRUE) { if (d->being_used == HSI_FALSE && (d->size + len) < HSI_LARGE_BLOCK_SIZE) { #if MCM_DBG_LOG printk("\nmcm: adding in the queued buffer for ch %d\n",chno); #endif buf = d->buf + d->size; d->size += len; flag = 0; } else flag = 1; } #endif if (flag) { #ifdef XMD_TX_MULTI_PACKET buf = hsi_mem_alloc(HSI_LARGE_BLOCK_SIZE); #else buf = hsi_mem_alloc(len); #endif flag = 1; } if (!buf || !data) return -ENOMEM; memcpy(buf, data, len); if (flag) { d = NULL; n = write_q(&hsi_channels[chno].tx_q, buf, len, &d); #if MCM_DBG_LOG printk("\nmcm: n = %d\n",n); #endif if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy writing already queued data\n",chno); #endif hsi_mem_free(buf); PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } else if (n == 1) { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } } return 0; }
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg) { ll_rx_tx_data *data = (ll_rx_tx_data *) arg; if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) || hsi_channels[chno].state == HSI_CH_NOT_USED) { #if MCM_DBG_ERR_LOG printk("\nmcm: Wrong channel number or channel not used\n"); #endif return; } switch(event) { case HSI_LL_EV_ALLOC_MEM: // if event is allocate read mem, { #if MCM_DBG_LOG printk("\nmcm: Allocating read memory of size %d to channel %d \n", data->size, chno); #endif /* MODEM can't handle NAK so we allocate memory and drop the packet after recieving from MODEM */ #if 0 spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so not allocating memory\n"); #endif data->buffer = NULL; break; } spin_unlock_bh(&hsi_channels[chno].lock); #endif data->buffer = (char *)hsi_mem_alloc(data->size); } break; case HSI_LL_EV_FREE_MEM: // if event is free read mem, { #if MCM_DBG_LOG printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n",chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so cant free mem\n"); #endif break; } spin_unlock_bh(&hsi_channels[chno].lock); hsi_mem_free(data->buffer); } break; case HSI_LL_EV_RESET_MEM: // if event is break, handle it somehow. break; // if event is modem powered on, wake up the event. //xmd_boot_cb(); TBD from DLP case HSI_LL_EV_WRITE_COMPLETE: { #if MCM_DBG_LOG printk("\nmcm:unlocking mutex for ch: %d\n",chno); #endif hsi_channels[chno].write_happening = HSI_FALSE; //spinlock protection for write_happening... TBD wake_up(&hsi_channels[chno].write_wait); hsi_mem_free(data->buffer); #if MCM_DBG_LOG printk("\nmcm: write complete cb, ch %d\n",chno); #endif } break; case HSI_LL_EV_READ_COMPLETE: // if event is send data, schedule work q to send data to upper layers { int n = 0; #if MCM_DBG_LOG printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n", data->size, chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno); #endif hsi_mem_free(data->buffer); break; } n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL); spin_unlock_bh(&hsi_channels[chno].lock); if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno); #endif hsi_mem_free(data->buffer); PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } else if (n == 1) { if (hsi_channels[chno].read_happening == HSI_FALSE) { hsi_channels[chno].read_happening = HSI_TRUE; //spinlock protection for read_happening... TBD } PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } // if n > 1, no need to schdule the wq again. } break; default: //Wrong event. break; } }
static int hsi_ch_net_write(int chno, void *data, int len) { /* Non blocking write */ void *buf = NULL; static struct x_data *d = NULL; int n = 0; int flag = 1; int ret = 0; if (!data) { #if MCM_DBG_ERR_LOG printk("\nmcm: data is NULL.\n"); #endif return -EINVAL; } #ifdef XMD_TX_MULTI_PACKET if (d && hsi_channels[chno].write_queued == HSI_TRUE) { if (d->being_used == HSI_FALSE && (d->size + len) < HSI_MEM_LARGE_BLOCK_SIZE) { #if MCM_DBG_LOG printk("\nmcm: Adding in the queued buffer for ch %d\n",chno); #endif buf = d->buf + d->size; d->size += len; flag = 0; } else { flag = 1; } } #endif if (flag) { #ifdef XMD_TX_MULTI_PACKET buf = hsi_mem_alloc(HSI_MEM_LARGE_BLOCK_SIZE); #else buf = hsi_mem_alloc(len); #endif flag = 1; } if (!buf) { #if MCM_DBG_ERR_LOG printk("\nmcm: Failed to alloc memory So Cannot transfer packet.\n"); #endif #if 1 hsi_channels[chno].tx_blocked = 1; #endif return -ENOMEM; } memcpy(buf, data, len); if (flag) { d = NULL; n = write_q(&hsi_channels[chno].tx_q, buf, len, &d); if (n != 0) { hsi_channels[chno].pending_tx_msgs++; } #if MCM_DBG_LOG printk("\nmcm: n = %d\n",n); #endif if (n == 0) { #if MCM_DBG_LOG printk("\nmcm: rmnet TX queue is full for channel %d, So cannot transfer this packet.\n",chno); #endif hsi_channels[chno].tx_blocked = 1; hsi_mem_free(buf); #if 1 if (hsi_channels[chno].write_queued == HSI_TRUE) { #if MCM_DBG_LOG printk("\nmcm: hsi_ch_net_write wq already in progress\n"); #endif } else { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } #endif ret = -EBUSY; } else if (n == 1) { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); ret = 0; } } return ret; }
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg) { ll_rx_tx_data *data = (ll_rx_tx_data *) arg; if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) || hsi_channels[chno].state == HSI_CH_NOT_USED) { #if MCM_DBG_ERR_LOG printk("\nmcm: Wrong channel number or channel not used\n"); #endif return; } switch(event) { case HSI_LL_EV_ALLOC_MEM: { if(chno >= 13) { if (hsi_channels[chno].pending_rx_msgs >= NUM_X_BUF) { data->buffer = 0; #if !defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) #if MCM_DBG_ERR_LOG printk("\nmcm: Channel %d RX queue is full so sending NAK to CP\n", chno); #endif #else hsi_channels[chno].pending_rx_size = data->size; hsi_channels[chno].rx_blocked = 1; #endif break; } else { hsi_channels[chno].pending_rx_msgs++; } } #if MCM_DBG_LOG printk("\nmcm: Allocating read memory of size %d to channel %d \n", data->size, chno); #endif /* MODEM can't handle NAK so we allocate memory and drop the packet after recieving from MODEM */ #if 0 spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so not allocating memory\n"); #endif data->buffer = NULL; break; } spin_unlock_bh(&hsi_channels[chno].lock); #endif data->buffer = (char *)hsi_mem_alloc(data->size); #if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) if(data->buffer == NULL) { hsi_channels[chno].pending_rx_size = data->size; PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work); queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work); } #endif } break; case HSI_LL_EV_FREE_MEM: { #if MCM_DBG_LOG printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n", chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so cant free mem\n"); #endif break; } spin_unlock_bh(&hsi_channels[chno].lock); hsi_mem_free(data->buffer); } break; case HSI_LL_EV_RESET_MEM: /* if event is break, handle it somehow. */ break; case HSI_LL_EV_WRITE_COMPLETE: { #if MCM_DBG_LOG printk("\nmcm:unlocking mutex for ch: %d\n",chno); #endif // /* Uplink Throughput issue */ #if 1 hsi_mem_free(data->buffer); #endif // hsi_channels[chno].write_happening = HSI_FALSE; wake_up(&hsi_channels[chno].write_wait); // /* Uplink Throughput issue */ #if 0 hsi_mem_free(data->buffer); #endif // #if MCM_DBG_LOG printk("\nmcm: write complete cb, ch %d\n",chno); #endif } break; case HSI_LL_EV_READ_COMPLETE: { int n = 0; #if MCM_DBG_LOG printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n", data->size, chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { if(chno >= 13) { hsi_channels[chno].pending_rx_msgs--; } spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno); #endif hsi_mem_free(data->buffer); #if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) if(hsi_channels[chno].rx_blocked) { hsi_channels[chno].rx_blocked = 0; spin_lock_bh(&hsi_channels[chno].lock); hsi_channels[chno].pending_rx_msgs++; spin_unlock_bh(&hsi_channels[chno].lock); PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work); queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work); } #endif break; } n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL); spin_unlock_bh(&hsi_channels[chno].lock); if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno); #endif hsi_mem_free(data->buffer); /* Schedule work Q to send data to upper layers */ PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } else if (n == 1) { if (hsi_channels[chno].read_happening == HSI_FALSE) { hsi_channels[chno].read_happening = HSI_TRUE; } PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } /* if n > 1, no need to schdule the wq again. */ } break; default: /* Wrong event. */ #if MCM_DBG_ERR_LOG printk("\nmcm:Wrong event.ch %d event %d", chno, event); #endif break; } }
// TODO make it more general void Foam::calcTypes::fieldMap2d::calc ( const argList& args, const Time& runTime, const fvMesh& mesh ) { // coordinates of points on the surface of the fracture walls //memInfo mf; // print what is calculated if(processingType_ == "all") Info << "Processing all fields..." << endl; else if(processingType_ == "surf") Info << "Calculating csurf and h" << endl; else if(processingType_ == "int") Info << "Calculating C, qx and qy" << endl; else if(processingType_ == "h") Info << "Processing the aperture of the fracture..." << endl; else if(processingType_ == "U") Info << "Processing the flux qx and qy..." << endl; else if(processingType_ == "p") FatalError<<"p processing is not implemented yet"<<nl<<exit(FatalError); else if(processingType_ == "C") Info << "Processing the concentration field..." << endl; else if(processingType_ == "csurf") Info << "Calculating concentration on the surface" << endl; else if(processingType_ == "temp") Info << "Running temporary function..." << endl; else if(processingType_ == "ccAll") Info << "Processing all fields for concentric cylinder geometry" << endl; else FatalError<<"Unable to process "<<processingType_<<nl<<exit(FatalError); for(int cI=0; cI<totNumLoop; cI++) { curNum = cI; curBlock = thisTimeSize * curNum; sizeAA = thisTimeSize; if(cI==totNumLoop-1){ sizeAA = N1M1 - (totNumLoop-1)*thisTimeSize; } Info << "Find the points on the surface"<<nl; pointsXYonsurface.clear(); pointsXYonsurface.setSize( expNI * sizeAA ); if(geometry=="flat") { build_surface_points( mesh ); } else if(geometry=="concentricCylinders") { build_surface_pointsCC( mesh ); } Info << "build_surface_points done"<<nl; fileName current_dissolCalc_dir; current_dissolCalc_dir = "postProcessing/dissolCalc" / runTime.timeName(); if ( !isDir(current_dissolCalc_dir) ) mkDir(current_dissolCalc_dir); if(processingType_ == "all"){ write_all(mesh, runTime); } else if(processingType_ == "surf"){ write_surf(mesh, runTime); } else if(processingType_ == "int"){ write_int(mesh, runTime); } else if(processingType_ == "h"){ write_h(mesh, runTime); } else if(processingType_ == "U"){ write_q(mesh, runTime); } else if(processingType_ == "C"){ write_Ccup(mesh, runTime); } else if(processingType_ == "csurf"){ write_csurf(mesh, runTime); } else if(processingType_ == "temp"){ write_temp(mesh, runTime); } else if(processingType_ == "ccAll"){ write_ccAll(mesh, runTime); } else{ FatalError<<"Unable to process "<<processingType_<<nl<<nl<<exit(FatalError); } } }