static int hsi_ch_net_write(int chno, void *data, int len) { /* Non blocking write */ void *buf = NULL; static struct x_data *d = NULL; int n = 0; int flag = 1; #ifdef XMD_TX_MULTI_PACKET if (d && hsi_channels[chno].write_queued == HSI_TRUE) { if (d->being_used == HSI_FALSE && (d->size + len) < HSI_LARGE_BLOCK_SIZE) { #if MCM_DBG_LOG printk("\nmcm: adding in the queued buffer for ch %d\n",chno); #endif buf = d->buf + d->size; d->size += len; flag = 0; } else flag = 1; } #endif if (flag) { #ifdef XMD_TX_MULTI_PACKET buf = hsi_mem_alloc(HSI_LARGE_BLOCK_SIZE); #else buf = hsi_mem_alloc(len); #endif flag = 1; } if (!buf || !data) return -ENOMEM; memcpy(buf, data, len); if (flag) { d = NULL; n = write_q(&hsi_channels[chno].tx_q, buf, len, &d); #if MCM_DBG_LOG printk("\nmcm: n = %d\n",n); #endif if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy writing already queued data\n",chno); #endif hsi_mem_free(buf); PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } else if (n == 1) { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } } return 0; }
static int hsi_ch_tty_write(int chno, void *data, int len) { void *buf = NULL; int err; buf = hsi_mem_alloc(len); if (!buf) return -ENOMEM; //printk("\n data = %c, len = %d\n",((char*)data)[0], len); memcpy(buf, data, len); hsi_channels[chno].write_happening = HSI_TRUE; err = hsi_ll_write(chno, (unsigned char *)buf, len); if (err < 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_write failed\n"); #endif hsi_channels[chno].write_happening = HSI_FALSE; } if (hsi_channels[chno].write_happening == HSI_TRUE) { //spinlock may be used for write_happening #if MCM_DBG_LOG printk("\nmcm:locking mutex for ch: %d\n",chno); #endif wait_event(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE); } return err; }
static int hsi_ch_tty_write(int chno, void *data, int len) { void *buf = NULL; int err; buf = hsi_mem_alloc(len); if (!buf) { return -ENOMEM; } memcpy(buf, data, len); hsi_channels[chno].write_happening = HSI_TRUE; err = hsi_ll_write(chno, (unsigned char *)buf, len); if (err < 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_write(...) failed. err=%d\n",err); #endif /* Should free in error case */ #if 1 hsi_mem_free(buf); #endif hsi_channels[chno].write_happening = HSI_FALSE; } #if defined(MIPI_HSI_TTY_WRITE_TIMEOUT_FEATURE) else { #if MCM_DBG_LOG printk("\nmcm:locking mutex start for ch: %d\n", chno); #endif #if defined (TARGET_CARRIER_ATT) && !defined (MIPI_HSI_CHECK_CP_RX_INFO) if(chno == XMD_TTY_CIQ_CHANNEL) { wait_event(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE); #if MCM_DBG_LOG printk("\nmcm:locking mutex end for ch: %d\n", chno); #endif } else #endif { err = hsi_ch_write_timeout(chno, buf); #if MCM_DBG_LOG if (err < 0) printk("\nmcm:hsi_ch_write_timeout ret %d for ch: %d\n", err, chno); #endif } #if defined(HSI_MCM_NOTIFY_TO_CHARGER) hsi_ch_notify_to_charger(err); #endif } #else else {
static int hsi_ch_tty_write(int chno, void *data, int len) { void *buf = NULL; int err; buf = hsi_mem_alloc(len); if (!buf) { return -ENOMEM; } memcpy(buf, data, len); hsi_channels[chno].write_happening = HSI_TRUE; err = hsi_ll_write(chno, (unsigned char *)buf, len); if (err < 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_write(...) failed. err=%d\n",err); #endif hsi_channels[chno].write_happening = HSI_FALSE; } else { #if MCM_DBG_LOG printk("\nmcm:locking mutex for ch: %d\n",chno); #endif /* */ #if 0 //ORGINAL wait_event (hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE); #else //RIL Recovery fail patch wait_event_timeout (hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE, HSI_MCM_TTY_TX_TIMEOUT_VAL); if (hsi_channels[chno].write_happening == HSI_TRUE){ printk("mcm:[RIL Recovery]hsi_ch_tty_write failed. err= -9\n"); err = -9; } #endif /* */ } return err; }
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg) { ll_rx_tx_data *data = (ll_rx_tx_data *) arg; if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) || hsi_channels[chno].state == HSI_CH_NOT_USED) { #if MCM_DBG_ERR_LOG printk("\nmcm: Wrong channel number or channel not used\n"); #endif return; } switch(event) { case HSI_LL_EV_ALLOC_MEM: // if event is allocate read mem, { #if MCM_DBG_LOG printk("\nmcm: Allocating read memory of size %d to channel %d \n", data->size, chno); #endif /* MODEM can't handle NAK so we allocate memory and drop the packet after recieving from MODEM */ #if 0 spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so not allocating memory\n"); #endif data->buffer = NULL; break; } spin_unlock_bh(&hsi_channels[chno].lock); #endif data->buffer = (char *)hsi_mem_alloc(data->size); } break; case HSI_LL_EV_FREE_MEM: // if event is free read mem, { #if MCM_DBG_LOG printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n",chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so cant free mem\n"); #endif break; } spin_unlock_bh(&hsi_channels[chno].lock); hsi_mem_free(data->buffer); } break; case HSI_LL_EV_RESET_MEM: // if event is break, handle it somehow. break; // if event is modem powered on, wake up the event. //xmd_boot_cb(); TBD from DLP case HSI_LL_EV_WRITE_COMPLETE: { #if MCM_DBG_LOG printk("\nmcm:unlocking mutex for ch: %d\n",chno); #endif hsi_channels[chno].write_happening = HSI_FALSE; //spinlock protection for write_happening... TBD wake_up(&hsi_channels[chno].write_wait); hsi_mem_free(data->buffer); #if MCM_DBG_LOG printk("\nmcm: write complete cb, ch %d\n",chno); #endif } break; case HSI_LL_EV_READ_COMPLETE: // if event is send data, schedule work q to send data to upper layers { int n = 0; #if MCM_DBG_LOG printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n", data->size, chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno); #endif hsi_mem_free(data->buffer); break; } n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL); spin_unlock_bh(&hsi_channels[chno].lock); if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno); #endif hsi_mem_free(data->buffer); PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } else if (n == 1) { if (hsi_channels[chno].read_happening == HSI_FALSE) { hsi_channels[chno].read_happening = HSI_TRUE; //spinlock protection for read_happening... TBD } PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } // if n > 1, no need to schdule the wq again. } break; default: //Wrong event. break; } }
static int hsi_ch_net_write(int chno, void *data, int len) { /* Non blocking write */ void *buf = NULL; static struct x_data *d = NULL; int n = 0; int flag = 1; int ret = 0; if (!data) { #if MCM_DBG_ERR_LOG printk("\nmcm: data is NULL.\n"); #endif return -EINVAL; } #ifdef XMD_TX_MULTI_PACKET if (d && hsi_channels[chno].write_queued == HSI_TRUE) { if (d->being_used == HSI_FALSE && (d->size + len) < HSI_MEM_LARGE_BLOCK_SIZE) { #if MCM_DBG_LOG printk("\nmcm: Adding in the queued buffer for ch %d\n",chno); #endif buf = d->buf + d->size; d->size += len; flag = 0; } else { flag = 1; } } #endif if (flag) { #ifdef XMD_TX_MULTI_PACKET buf = hsi_mem_alloc(HSI_MEM_LARGE_BLOCK_SIZE); #else buf = hsi_mem_alloc(len); #endif flag = 1; } if (!buf) { #if MCM_DBG_ERR_LOG printk("\nmcm: Failed to alloc memory So Cannot transfer packet.\n"); #endif #if 1 hsi_channels[chno].tx_blocked = 1; #endif return -ENOMEM; } memcpy(buf, data, len); if (flag) { d = NULL; n = write_q(&hsi_channels[chno].tx_q, buf, len, &d); if (n != 0) { hsi_channels[chno].pending_tx_msgs++; } #if MCM_DBG_LOG printk("\nmcm: n = %d\n",n); #endif if (n == 0) { #if MCM_DBG_LOG printk("\nmcm: rmnet TX queue is full for channel %d, So cannot transfer this packet.\n",chno); #endif hsi_channels[chno].tx_blocked = 1; hsi_mem_free(buf); #if 1 if (hsi_channels[chno].write_queued == HSI_TRUE) { #if MCM_DBG_LOG printk("\nmcm: hsi_ch_net_write wq already in progress\n"); #endif } else { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); } #endif ret = -EBUSY; } else if (n == 1) { PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work); queue_work(hsi_write_wq, &hsi_channels[chno].write_work); ret = 0; } } return ret; }
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg) { ll_rx_tx_data *data = (ll_rx_tx_data *) arg; if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) || hsi_channels[chno].state == HSI_CH_NOT_USED) { #if MCM_DBG_ERR_LOG printk("\nmcm: Wrong channel number or channel not used\n"); #endif return; } switch(event) { case HSI_LL_EV_ALLOC_MEM: { if(chno >= 13) { if (hsi_channels[chno].pending_rx_msgs >= NUM_X_BUF) { data->buffer = 0; #if !defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) #if MCM_DBG_ERR_LOG printk("\nmcm: Channel %d RX queue is full so sending NAK to CP\n", chno); #endif #else hsi_channels[chno].pending_rx_size = data->size; hsi_channels[chno].rx_blocked = 1; #endif break; } else { hsi_channels[chno].pending_rx_msgs++; } } #if MCM_DBG_LOG printk("\nmcm: Allocating read memory of size %d to channel %d \n", data->size, chno); #endif /* MODEM can't handle NAK so we allocate memory and drop the packet after recieving from MODEM */ #if 0 spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so not allocating memory\n"); #endif data->buffer = NULL; break; } spin_unlock_bh(&hsi_channels[chno].lock); #endif data->buffer = (char *)hsi_mem_alloc(data->size); #if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) if(data->buffer == NULL) { hsi_channels[chno].pending_rx_size = data->size; PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work); queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work); } #endif } break; case HSI_LL_EV_FREE_MEM: { #if MCM_DBG_LOG printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n", chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel not yet opened so cant free mem\n"); #endif break; } spin_unlock_bh(&hsi_channels[chno].lock); hsi_mem_free(data->buffer); } break; case HSI_LL_EV_RESET_MEM: /* if event is break, handle it somehow. */ break; case HSI_LL_EV_WRITE_COMPLETE: { #if MCM_DBG_LOG printk("\nmcm:unlocking mutex for ch: %d\n",chno); #endif // /* Uplink Throughput issue */ #if 1 hsi_mem_free(data->buffer); #endif // hsi_channels[chno].write_happening = HSI_FALSE; wake_up(&hsi_channels[chno].write_wait); // /* Uplink Throughput issue */ #if 0 hsi_mem_free(data->buffer); #endif // #if MCM_DBG_LOG printk("\nmcm: write complete cb, ch %d\n",chno); #endif } break; case HSI_LL_EV_READ_COMPLETE: { int n = 0; #if MCM_DBG_LOG printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n", data->size, chno,data->buffer); #endif spin_lock_bh(&hsi_channels[chno].lock); if (hsi_channels[chno].state == HSI_CH_FREE) { if(chno >= 13) { hsi_channels[chno].pending_rx_msgs--; } spin_unlock_bh(&hsi_channels[chno].lock); #if MCM_DBG_ERR_LOG printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno); #endif hsi_mem_free(data->buffer); #if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ) if(hsi_channels[chno].rx_blocked) { hsi_channels[chno].rx_blocked = 0; spin_lock_bh(&hsi_channels[chno].lock); hsi_channels[chno].pending_rx_msgs++; spin_unlock_bh(&hsi_channels[chno].lock); PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work); queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work); } #endif break; } n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL); spin_unlock_bh(&hsi_channels[chno].lock); if (n == 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno); #endif hsi_mem_free(data->buffer); /* Schedule work Q to send data to upper layers */ PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } else if (n == 1) { if (hsi_channels[chno].read_happening == HSI_FALSE) { hsi_channels[chno].read_happening = HSI_TRUE; } PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work); queue_work(hsi_read_wq, &hsi_channels[chno].read_work); } /* if n > 1, no need to schdule the wq again. */ } break; default: /* Wrong event. */ #if MCM_DBG_ERR_LOG printk("\nmcm:Wrong event.ch %d event %d", chno, event); #endif break; } }
static int hsi_ch_write_timeout(int chno, void *buf, long timeout) { int err = 0, rc = 0, i =0; #if 1 while (1) { i++; #else for (i = 0; i < HSI_WRITE_TIMEOUT_TRY; i++) { #endif rc = wait_event_timeout(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE, timeout); if( hsi_mcm_state == HSI_MCM_STATE_ERR_RECOVERY) { #if MCM_DBG_LOG printk("\nmcm:locking 1st mutex end for ch: %d\n", chno); #endif err= -EREMOTEIO; break; } if (rc == 0) { int ret = hsi_ll_check_channel(chno); if(ret == -EPERM) { err = -EREMOTEIO; hsi_channels[chno].write_happening = HSI_FALSE; hsi_ll_reset_write_channel(chno); hsi_mem_free(buf); #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_check_channel - hsi_ch_write_timeout(...) failed\n"); #endif break; } else if(ret == -EACCES){ hsi_channels[chno].write_happening = HSI_FALSE; err = -EREMOTEIO; #if MCM_DBG_ERR_LOG printk("\nmcm:unlocking 1st mutex end for ch: %d\n", chno); #endif break; } #if 0 else if(ret == -EAGAIN){ rc = wait_event_timeout(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE, timeout); if (rc == 0) { err = -EREMOTEIO; hsi_ll_reset_write_channel(chno); hsi_mem_free(buf); #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_check_channel - 2st hsi_ch_write_timeout(...) failed\n"); #endif } else { err = 0; #if MCM_DBG_LOG printk("\nmcm:unlocking 2st mutex end for ch: %d\n", chno); #endif } } #endif else if(ret == -EBUSY){ #if 1 #if MCM_DBG_ERR_LOG if(i % HSI_WRITE_TIMEOUT_TRY == 0) printk("\nmcm:hsi_ch_write_timeout - EBUSY for ch: %d\n", chno); #endif err = -EBUSY; #else wait_event(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE); err = 0; #if MCM_DBG_LOG printk("\nmcm:unlocking 3st mutex end for ch: %d\n", chno); #endif #endif } else { err = 0; #if MCM_DBG_LOG printk("\nmcm:unlocking 4st mutex end for ch: %d\n", chno); #endif break; } } else { err = 0; #if MCM_DBG_LOG printk("\nmcm:unlocking 4st mutex end for ch: %d\n", chno); #endif break; } } #if 0 if(err == -EBUSY) { err = 0; hsi_channels[chno].write_happening = HSI_FALSE; hsi_ll_reset_write_channel(chno); hsi_mem_free(buf); #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_check_channel - EBUSY(...) failed\n"); #endif } #endif return err; } #endif static int hsi_ch_tty_write(int chno, void *data, int len) { void *buf = NULL; int err; buf = hsi_mem_alloc(len); if (!buf) { return -ENOMEM; } memcpy(buf, data, len); hsi_channels[chno].write_happening = HSI_TRUE; err = hsi_ll_write(chno, (unsigned char *)buf, len); if (err < 0) { #if MCM_DBG_ERR_LOG printk("\nmcm: hsi_ll_write(...) failed. err=%d\n",err); #endif /* Should free in error case */ #if 1 hsi_mem_free(buf); #endif hsi_channels[chno].write_happening = HSI_FALSE; } #if defined(MIPI_HSI_TTY_WRITE_TIMEOUT_FEATURE) else { #if MCM_DBG_LOG printk("\nmcm:locking mutex start for ch: %d\n", chno); #endif #if defined (TARGET_CARRIER_ATT) && !defined (MIPI_HSI_CHECK_CP_RX_INFO) if(chno == XMD_TTY_CIQ_CHANNEL) { wait_event(hsi_channels[chno].write_wait, hsi_channels[chno].write_happening == HSI_FALSE); #if MCM_DBG_LOG printk("\nmcm:locking mutex end for ch: %d\n", chno); #endif } else #endif { err = hsi_ch_write_timeout(chno, buf, HSI_WRITE_TTY_TIMEOUT); #if MCM_DBG_LOG if (err < 0) printk("\nmcm:hsi_ch_write_timeout ret %d for ch: %d\n", err, chno); #endif } #if defined(HSI_MCM_NOTIFY_TO_CHARGER) hsi_ch_notify_to_charger(err); #endif } #else else {