Ejemplo n.º 1
0
static int hsi_ch_net_write(int chno, void *data, int len)
{
	/* Non blocking write */
	void *buf = NULL;
	static struct x_data *d = NULL;
	int n = 0;
	int flag = 1;

#ifdef XMD_TX_MULTI_PACKET
	if (d && hsi_channels[chno].write_queued == HSI_TRUE) {
		if (d->being_used == HSI_FALSE && (d->size + len) < HSI_LARGE_BLOCK_SIZE) {
#if MCM_DBG_LOG
			printk("\nmcm: adding in the queued buffer for ch %d\n",chno);
#endif
			buf = d->buf + d->size;
			d->size += len;
			flag = 0;
		} else
			flag = 1;
	}
#endif
	if (flag) {
#ifdef XMD_TX_MULTI_PACKET
		buf = hsi_mem_alloc(HSI_LARGE_BLOCK_SIZE);
#else
		buf = hsi_mem_alloc(len);
#endif
		flag = 1;
	}
 
	if (!buf || !data)
		return -ENOMEM;

	memcpy(buf, data, len);
  
	if (flag) {
		d = NULL;
		n = write_q(&hsi_channels[chno].tx_q, buf, len, &d);
#if MCM_DBG_LOG
		printk("\nmcm: n = %d\n",n);
#endif
		if (n == 0) {
#if MCM_DBG_ERR_LOG
			printk("\nmcm: Dropping the packet as channel %d is busy writing already queued data\n",chno);
#endif
			hsi_mem_free(buf);
			PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
			queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
		} else if (n == 1) {
			PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
			queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
		}
	}
  
	return 0;
}
Ejemplo n.º 2
0
/* 
 * This function services keyboard interrupts. It reads the relevant
 * information from the keyboard and then puts the non time critical
 * part into the work queue. This will be run when the kernel considers it safe.
 */
irqreturn_t irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	/* 
	 * This variables are static because they need to be
	 * accessible (through pointers) to the bottom half routine.
	 */
	static int initialised = 0;
	static unsigned char scancode;
	static struct work_struct task;
	unsigned char status;

	/* 
	 * Read keyboard status
	 */
	status = inb(0x64);
	scancode = inb(0x60);

	if (initialised == 0) {
		INIT_WORK(&task, got_char, &scancode);
		initialised = 1;
	} else {
		PREPARE_WORK(&task, got_char, &scancode);
	}

	queue_work(my_workqueue, &task);

	return IRQ_HANDLED;
}
static void smsspi_int_handler(void *context)
{
    struct _spi_device_st *spi_device = (struct _spi_device_st *) context;
    PDEBUG("interrupt\n");
    PREPARE_WORK(&spi_work_queue, (void *)spi_worker_thread);
    schedule_work(&spi_work_queue);
}
Ejemplo n.º 4
0
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct pdp_info *dev = (struct pdp_info *)net->ml_priv;

#ifdef USE_LOOPBACK_PING
	int ret;
	struct sk_buff *skb2;
	struct icmphdr *icmph;
	struct iphdr *iph;
#endif

   DPRINTK(2, "BEGIN\n");

#ifdef USE_LOOPBACK_PING
	dev->vn_dev.stats.tx_bytes += skb->len;
	dev->vn_dev.stats.tx_packets++;

	skb2 = alloc_skb(skb->len, GFP_ATOMIC);
	if (skb2 == NULL) {
		DPRINTK(1, "alloc_skb() failed\n");
		dev_kfree_skb_any(skb);
		return -ENOMEM;
	}

	memcpy(skb2->data, skb->data, skb->len);
	skb_put(skb2, skb->len);
	dev_kfree_skb_any(skb);

	icmph = (struct icmphdr *)(skb2->data + sizeof(struct iphdr));
	iph = (struct iphdr *)skb2->data;

	icmph->type = __constant_htons(ICMP_ECHOREPLY);

	ret = iph->daddr;
	iph->daddr = iph->saddr;
	iph->saddr = ret;
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	skb2->dev = net;
	skb2->protocol = __constant_htons(ETH_P_IP);

	netif_rx(skb2);

	dev->vn_dev.stats.rx_packets++;
	dev->vn_dev.stats.rx_bytes += skb->len;
#else
   if (vnet_start_xmit_flag != 0) {
       return NETDEV_TX_BUSY;
   }
	vnet_start_xmit_flag = 1; 
	workqueue_data = (unsigned long)skb;
	PREPARE_WORK(&dev->vn_dev.xmit_task,vnet_defer_xmit);
	schedule_work(&dev->vn_dev.xmit_task);
	netif_stop_queue(net);
#endif

   DPRINTK(2, "END\n");
	return NETDEV_TX_OK;
}
Ejemplo n.º 5
0
/*
 * perform processing on an asynchronous call
 * - on a multiple-thread workqueue this work item may try to run on several
 *   CPUs at the same time
 */
static void afs_process_async_call(struct work_struct *work)
{
	struct afs_call *call =
		container_of(work, struct afs_call, async_work);

	_enter("");

	if (!skb_queue_empty(&call->rx_queue))
		afs_deliver_to_call(call);

	if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
		if (call->wait_mode->async_complete)
			call->wait_mode->async_complete(call->reply,
							call->error);
		call->reply = NULL;

		/* kill the call */
		rxrpc_kernel_end_call(call->rxcall);
		call->rxcall = NULL;
		if (call->type->destructor)
			call->type->destructor(call);

		/* we can't just delete the call because the work item may be
		 * queued */
		PREPARE_WORK(&call->async_work, afs_delete_async_call);
		queue_work(afs_async_calls, &call->async_work);
	}

	_leave("");
}
/*
 * stsc_timer : - Timer function synchronizing synthetic TSC.
 * @data: unused
 *
 * Guarantees at least 1 execution before low word of TSC wraps.
 */
static void stsc_timer_fct(unsigned long data)
{
	PREPARE_WORK(&stsc_work, synthetic_tsc_work);
	schedule_work(&stsc_work);

	mod_timer(&stsc_timer, jiffies + precalc_expire);
}
Ejemplo n.º 7
0
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
	int retval;

	/*
	 * Allocate eeprom data.
	 */
	retval = rt2800usb_validate_eeprom(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2800_init_eeprom(rt2x00dev);
	if (retval)
		return retval;

	/*
	 * Initialize hw specifications.
	 */
	retval = rt2800_probe_hw_mode(rt2x00dev);
	if (retval)
		return retval;

	/*
	 * This device has multiple filters for control frames
	 * and has a separate filter for PS Poll frames.
	 */
	__set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
	__set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);

	/*
	 * This device requires firmware.
	 */
	__set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
	__set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
	if (!modparam_nohwcrypt)
		__set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
	__set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
	__set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
	__set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
	__set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);

	setup_timer(&rt2x00dev->txstatus_timer,
		    rt2800usb_tx_sta_fifo_timeout,
		    (unsigned long) rt2x00dev);

	/*
	 * Set the rssi offset.
	 */
	rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;

	/*
	 * Overwrite TX done handler
	 */
	PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);

	return 0;
}
Ejemplo n.º 8
0
void hsi_read_work(struct work_struct *work)
{
	/* function registered with read work q */
	struct hsi_channel *ch = (struct hsi_channel*) container_of(work,
													struct hsi_channel,
													read_work);
	int chno = ch->info->chno;
	struct x_data *data = NULL;

	if (hsi_channels[chno].read_queued == HSI_TRUE) {
#if MCM_DBG_LOG
		printk("\nmcm: read wq already in progress\n");
#endif
		return;
	}

	hsi_channels[chno].read_queued = HSI_TRUE;

	while ((data = read_q(chno, &hsi_channels[chno].rx_q)) != NULL) {
		char *buf = data->buf;
		hsi_channels[chno].curr = data;

		if (hsi_mcm_state != HSI_MCM_STATE_ERR_RECOVERY) {
			hsi_channels[chno].notify(chno);
#if MCM_DBG_ERR_RECOVERY_LOG
		} else {
			printk("\nmcm:Dropping RX packets of channel %d from WQ as error recovery is in progress\n", chno);
#endif
		}

		hsi_mem_free(buf);
		if(chno >= 13) {
#if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
			if(hsi_channels[chno].rx_blocked) {
				hsi_channels[chno].rx_blocked = 0;
				spin_lock_bh(&hsi_channels[chno].lock);
				hsi_channels[chno].pending_rx_msgs++;
				spin_unlock_bh(&hsi_channels[chno].lock);
				PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work);
				queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work);
			}
#endif
			hsi_channels[chno].pending_rx_msgs--;
		}
	}
	hsi_channels[chno].read_queued = HSI_FALSE;
	spin_lock_bh(&hsi_channels[chno].lock);
	hsi_channels[chno].read_happening = HSI_FALSE;
	spin_unlock_bh(&hsi_channels[chno].lock);

	wake_up(&hsi_channels[chno].read_wait);
}
Ejemplo n.º 9
0
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
	int retval;

	/*
	 * Allocate eeprom data.
	 */
	retval = rt2800usb_validate_eeprom(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2800_init_eeprom(rt2x00dev);
	if (retval)
		return retval;

	/*
	 * Initialize hw specifications.
	 */
	retval = rt2800_probe_hw_mode(rt2x00dev);
	if (retval)
		return retval;

	/*
	 * This device has multiple filters for control frames
	 * and has a separate filter for PS Poll frames.
	 */
	__set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
	__set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);

	/*
	 * This device requires firmware.
	 */
	__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
	__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
	if (!modparam_nohwcrypt)
		__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
	__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
	__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);

	/*
	 * Set the rssi offset.
	 */
	rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;

	/*
	 * Overwrite TX done handler
	 */
	PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);

	return 0;
}
Ejemplo n.º 10
0
irqreturn_t gpio_irq(int irq, void *dev_id, struct pt_regs *regs)
{
	static int initialised = 0;
	static struct work_struct task;
	
	if (initialised == 0) {
		INIT_WORK(&task, handle_gpio, dev_id);
		initialised = 1;
	} else {
		PREPARE_WORK(&task, handle_gpio, dev_id);
	}

	queue_work(my_workqueue, &task);

	return IRQ_HANDLED;
}
Ejemplo n.º 11
0
static irqreturn_t
irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{

    printk(KERN_ALERT "Get some movemnet");
    
    if (!wq_init) {
        INIT_WORK(&mouse_info_ctx.wk, movement_work);
        wq_init = 1;
    } else {
        PREPARE_WORK(&mouse_info_ctx.wk, movement_work);
    }


    queue_work(mouse_info_ctx.wq, &mouse_info_ctx.wk);

    return IRQ_HANDLED;
}
Ejemplo n.º 12
0
static void led_virtual_dev(struct leds_dev_data *info)
{
	struct device *sec_led;
	int error = 0;
	mutex_init(&info->led_work_lock);

	INIT_WORK(&info->work_pat_batt_chrg, pm8xxx_led_work_pat_batt_chrg);
	PREPARE_WORK(&info->work_pat_batt_chrg, pm8xxx_led_work_pat_batt_chrg);

	INIT_WORK(&info->work_pat_chrg_err, pm8xxx_led_work_pat_chrg_err);
	PREPARE_WORK(&info->work_pat_chrg_err, pm8xxx_led_work_pat_chrg_err);

	INIT_WORK(&info->work_pat_miss_noti, pm8xxx_led_work_pat_miss_noti);
	PREPARE_WORK(&info->work_pat_miss_noti, pm8xxx_led_work_pat_miss_noti);

	INIT_WORK(&info->work_pat_in_lowbat, pm8xxx_led_work_pat_in_lowbat);
	PREPARE_WORK(&info->work_pat_in_lowbat, pm8xxx_led_work_pat_in_lowbat);

	INIT_WORK(&info->work_pat_full_chrg, pm8xxx_led_work_pat_full_chrg);
	PREPARE_WORK(&info->work_pat_full_chrg, pm8xxx_led_work_pat_full_chrg);

	INIT_WORK(&info->work_pat_powering, pm8xxx_led_work_pat_powering);
	PREPARE_WORK(&info->work_pat_powering, pm8xxx_led_work_pat_powering);

	sec_led = device_create(sec_class, NULL, 0, NULL, "led");
	error = dev_set_drvdata(sec_led, info);
	if (error)
		pr_err("Failed to set sec_led driver data");
	error = device_create_file(sec_led, &dev_attr_led_pattern);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_pattern");
	error = device_create_file(sec_led, &dev_attr_led_lowpower);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_lowpower");
	error = device_create_file(sec_led, &dev_attr_led_r);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_r");
	error = device_create_file(sec_led, &dev_attr_led_g);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_g");
	error = device_create_file(sec_led, &dev_attr_led_b);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_b");
	error = device_create_file(sec_led, &dev_attr_led_blink);
	if (error)
		pr_err("Failed to create /sys/class/sec/led/led_blink");

}
Ejemplo n.º 13
0
static int hsi_ch_net_write(int chno, void *data, int len)
{
	/* Non blocking write */
	void *buf = NULL;
	static struct x_data *d = NULL;
	int n = 0;
	int flag = 1;
	int ret = 0;

	if (!data) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: data is NULL.\n");
#endif
		return -EINVAL;
	}

#ifdef XMD_TX_MULTI_PACKET
	if (d && hsi_channels[chno].write_queued == HSI_TRUE) {
		if (d->being_used == HSI_FALSE && (d->size + len) < HSI_MEM_LARGE_BLOCK_SIZE) {
#if MCM_DBG_LOG
			printk("\nmcm: Adding in the queued buffer for ch %d\n",chno);
#endif
			buf = d->buf + d->size;
			d->size += len;
			flag = 0;
		} else {
			flag = 1;
		}
	}
#endif
	if (flag) {
#ifdef XMD_TX_MULTI_PACKET
		buf = hsi_mem_alloc(HSI_MEM_LARGE_BLOCK_SIZE);
#else
		buf = hsi_mem_alloc(len);
#endif
		flag = 1;
	}

	if (!buf) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: Failed to alloc memory So Cannot transfer packet.\n");
#endif
#if 1
		hsi_channels[chno].tx_blocked = 1;
#endif
		return -ENOMEM;
	}

	memcpy(buf, data, len);

	if (flag) {
		d = NULL;
		n = write_q(&hsi_channels[chno].tx_q, buf, len, &d);
		if (n != 0) {
			hsi_channels[chno].pending_tx_msgs++;
		}
#if MCM_DBG_LOG
		printk("\nmcm: n = %d\n",n);
#endif
		if (n == 0) {
#if MCM_DBG_LOG
			printk("\nmcm: rmnet TX queue is full for channel %d, So cannot transfer this packet.\n",chno);
#endif
			hsi_channels[chno].tx_blocked = 1;
			hsi_mem_free(buf);
#if 1
			if (hsi_channels[chno].write_queued == HSI_TRUE) {
#if MCM_DBG_LOG
				printk("\nmcm: hsi_ch_net_write wq already in progress\n");
#endif
			}
			else {
				PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
				queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
			}
#endif
			ret = -EBUSY;
		} else if (n == 1) {
			PREPARE_WORK(&hsi_channels[chno].write_work, hsi_write_work);
			queue_work(hsi_write_wq, &hsi_channels[chno].write_work);
			ret = 0;
		}
	}

	return ret;
}
Ejemplo n.º 14
0
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg)
{
	ll_rx_tx_data *data = (ll_rx_tx_data *) arg;

	if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) ||
		hsi_channels[chno].state == HSI_CH_NOT_USED) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: Wrong channel number or channel not used\n");
#endif
		return;
	}

	switch(event) {
	case HSI_LL_EV_ALLOC_MEM: {
		if(chno >= 13) {
			if (hsi_channels[chno].pending_rx_msgs >= NUM_X_BUF) {
				data->buffer = 0;
#if !defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
#if MCM_DBG_ERR_LOG
				printk("\nmcm: Channel %d RX queue is full so sending NAK to CP\n",
						chno);
#endif
#else
				hsi_channels[chno].pending_rx_size = data->size;
				hsi_channels[chno].rx_blocked = 1;
#endif
				break;
			} else {
				hsi_channels[chno].pending_rx_msgs++;
			}
		}

#if MCM_DBG_LOG
		printk("\nmcm: Allocating read memory of size %d to channel %d \n",
					data->size, chno);
#endif
		/* MODEM can't handle NAK so we allocate memory and
			drop the packet after recieving from MODEM */
#if 0
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel not yet opened so not allocating memory\n");
#endif
			data->buffer = NULL;
			break;
		}
		spin_unlock_bh(&hsi_channels[chno].lock);
#endif
		data->buffer = (char *)hsi_mem_alloc(data->size);

#if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
		if(data->buffer == NULL) {
			hsi_channels[chno].pending_rx_size = data->size;
			PREPARE_WORK(&hsi_channels[chno].buf_retry_work,
						 hsi_buf_retry_work);
			queue_work(hsi_buf_retry_wq,
					   &hsi_channels[chno].buf_retry_work);
		}
#endif
		}
		break;

	case HSI_LL_EV_FREE_MEM: {
#if MCM_DBG_LOG
		printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n",
					chno,data->buffer);
#endif
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel not yet opened so cant free mem\n");
#endif
			break;
			}
		spin_unlock_bh(&hsi_channels[chno].lock);
		hsi_mem_free(data->buffer);
		}
		break;

	case HSI_LL_EV_RESET_MEM:
		/* if event is break, handle it somehow. */
		break;

	case HSI_LL_EV_WRITE_COMPLETE: {
#if MCM_DBG_LOG
		printk("\nmcm:unlocking mutex for ch: %d\n",chno);
#endif

//                                                  
/* Uplink Throughput issue */
#if 1
		hsi_mem_free(data->buffer);
#endif
//                                                
		hsi_channels[chno].write_happening = HSI_FALSE;
		wake_up(&hsi_channels[chno].write_wait);
//                                                  
/* Uplink Throughput issue */
#if 0
		hsi_mem_free(data->buffer);
#endif
//                                                

#if MCM_DBG_LOG
		printk("\nmcm: write complete cb, ch %d\n",chno);
#endif
		}
		break;

	case HSI_LL_EV_READ_COMPLETE: {
		int n = 0;
#if MCM_DBG_LOG
		printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n",
					data->size, chno,data->buffer);
#endif
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			if(chno >= 13) {
				hsi_channels[chno].pending_rx_msgs--;
			}
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno);
#endif
			hsi_mem_free(data->buffer);
#if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
			if(hsi_channels[chno].rx_blocked) {
				hsi_channels[chno].rx_blocked = 0;
				spin_lock_bh(&hsi_channels[chno].lock);
				hsi_channels[chno].pending_rx_msgs++;
				spin_unlock_bh(&hsi_channels[chno].lock);
				PREPARE_WORK(&hsi_channels[chno].buf_retry_work, hsi_buf_retry_work);
				queue_work(hsi_buf_retry_wq, &hsi_channels[chno].buf_retry_work);
			}
#endif
			break;
		}

		n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL);

		spin_unlock_bh(&hsi_channels[chno].lock);

		if (n == 0) {
#if MCM_DBG_ERR_LOG
			printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno);
#endif
			hsi_mem_free(data->buffer);
			/* Schedule work Q to send data to upper layers */
			PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work);
            queue_work(hsi_read_wq, &hsi_channels[chno].read_work);
        } else if (n == 1) {
			if (hsi_channels[chno].read_happening == HSI_FALSE) {
				hsi_channels[chno].read_happening = HSI_TRUE;
			}
			PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work);
            queue_work(hsi_read_wq, &hsi_channels[chno].read_work);
		}
		/* if n > 1, no need to schdule the wq again. */
		}
		break;
	default:
		/* Wrong event. */
#if MCM_DBG_ERR_LOG
		printk("\nmcm:Wrong event.ch %d event %d", chno, event);
#endif
		break;
	}
}
Ejemplo n.º 15
0
void hsi_ch_cb(unsigned int chno, int result, int event, void* arg) 
{
	ll_rx_tx_data *data = (ll_rx_tx_data *) arg;

	if (!(chno <= MAX_HSI_CHANNELS && chno >= 0) || hsi_channels[chno].state == HSI_CH_NOT_USED) {
#if MCM_DBG_ERR_LOG
		printk("\nmcm: Wrong channel number or channel not used\n");
#endif
		return;
	}


	switch(event)
	{
	case HSI_LL_EV_ALLOC_MEM: // if event is allocate read mem, 
	{
#if MCM_DBG_LOG
		printk("\nmcm: Allocating read memory of size %d to channel %d \n", data->size, chno);
#endif
		/* MODEM can't handle NAK so we allocate memory and drop the packet after recieving from MODEM */
#if 0
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel not yet opened so not allocating memory\n");
#endif
			data->buffer = NULL;
			break;
		}
		spin_unlock_bh(&hsi_channels[chno].lock);
#endif
		data->buffer = (char *)hsi_mem_alloc(data->size);
	}
	break;
  
	case HSI_LL_EV_FREE_MEM: // if event is free read mem,
	{
#if MCM_DBG_LOG
		printk("\nmcm: Freeing memory for channel %d, ptr = 0x%p \n",chno,data->buffer);
#endif
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel not yet opened so cant free mem\n");
#endif
			break;
		}
		spin_unlock_bh(&hsi_channels[chno].lock);
		hsi_mem_free(data->buffer);
	}
	break;
  
	case HSI_LL_EV_RESET_MEM:
	// if event is break, handle it somehow.
	break;
  
	// if event is modem powered on, wake up the event.
	//xmd_boot_cb(); TBD from DLP

	case HSI_LL_EV_WRITE_COMPLETE:
	{
#if MCM_DBG_LOG
		printk("\nmcm:unlocking mutex for ch: %d\n",chno);
#endif
		hsi_channels[chno].write_happening = HSI_FALSE; //spinlock protection for write_happening... TBD
		wake_up(&hsi_channels[chno].write_wait);
		hsi_mem_free(data->buffer);
#if MCM_DBG_LOG
		printk("\nmcm: write complete cb, ch %d\n",chno);
#endif
	}
	break;
  
	case HSI_LL_EV_READ_COMPLETE: // if event is send data, schedule work q to send data to upper layers
	{
		int n = 0;
#if MCM_DBG_LOG
		printk("\nmcm: Read complete... size %d, channel %d, ptr = 0x%p \n", data->size, chno,data->buffer);
#endif
		spin_lock_bh(&hsi_channels[chno].lock);
		if (hsi_channels[chno].state == HSI_CH_FREE) {
			spin_unlock_bh(&hsi_channels[chno].lock);
#if MCM_DBG_ERR_LOG
			printk("\nmcm: channel %d not yet opened so dropping the packet\n",chno);
#endif
			hsi_mem_free(data->buffer);
			break;
		}
    
		n = write_q(&hsi_channels[chno].rx_q, data->buffer, data->size, NULL);

		spin_unlock_bh(&hsi_channels[chno].lock);

		if (n == 0) {
#if MCM_DBG_ERR_LOG
			printk("\nmcm: Dropping the packet as channel %d is busy sending already read data\n",chno);
#endif
			hsi_mem_free(data->buffer);
			PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work);
			queue_work(hsi_read_wq, &hsi_channels[chno].read_work);
		} else if (n == 1) {
			if (hsi_channels[chno].read_happening == HSI_FALSE)
			{
				hsi_channels[chno].read_happening = HSI_TRUE; //spinlock protection for read_happening... TBD
			}
			PREPARE_WORK(&hsi_channels[chno].read_work, hsi_read_work);
			queue_work(hsi_read_wq, &hsi_channels[chno].read_work);
      
		}
		// if n > 1, no need to schdule the wq again.
	}
	break;
	default:
		//Wrong event.
	break;
	}
}
Ejemplo n.º 16
0
void xmd_ch_close(int chno)
{
	printk("\nmcm:closing channel %d.\n", chno);

//                                                  
#if defined(CONFIG_MACH_LGE)
	/*                                         
                                         
                                          */
	if((chno == 1)||(chno == 2)||(chno == 3)
	||(chno == 4)||(chno == 5)||(chno == 8)||(chno == 11)){
#else
	if(chno == XMD_RIL_RECOVERY_CHANNEL) {
#endif
//                                                

#if MCM_DBG_ERR_RECOVERY_LOG
		printk("\nmcm: Ch %d closed so starting Recovery.\n", chno);
#endif
		xmd_dlp_recovery();
	}
	
	if (hsi_channels[chno].read_happening == HSI_TRUE) {
#if MCM_DBG_LOG
		printk("\nmcm:locking read mutex for ch: %d\n",chno);
#endif
		wait_event(hsi_channels[chno].read_wait,
					hsi_channels[chno].read_happening == HSI_FALSE);
	}
	
	hsi_ll_close(chno);
	spin_lock_bh(&hsi_channels[chno].lock);
	hsi_channels[chno].state = HSI_CH_FREE;
	spin_unlock_bh(&hsi_channels[chno].lock);
}

int xmd_ch_open(struct xmd_ch_info* info, void (*notify_cb)(int chno))
{
	int i;
	int size = ARRAY_SIZE(hsi_channels);

	for (i=0; i<size; i++) {
		if (hsi_channels[i].name)
			if (!strcmp(info->name, hsi_channels[i].name)) {
				if (hsi_channels[i].state == HSI_CH_BUSY ||

					hsi_channels[i].state == HSI_CH_NOT_USED) {
#if MCM_DBG_ERR_LOG
					printk("\nmcm:Channel state not suitable %d\n",i);
#endif
					return -EINVAL;
				}

//                                                  
#if defined(CONFIG_MACH_LGE)
				/*                                         
                                            
                                             */
				if(((i == 1)||(i == 2)||(i == 3)
				||(i == 4)||(i == 5)||(i == 8)||(i == 11)) &&
#else
				if ((i == XMD_RIL_RECOVERY_CHANNEL) &&
#endif
//                                                
					(hsi_mcm_state == HSI_MCM_STATE_ERR_RECOVERY)) {
#if MCM_DBG_ERR_RECOVERY_LOG
						printk("\nmcm: Recovery completed by chno %d.\n", i);
#endif
					xmd_ch_reinit();
				}

				if (0 != hsi_ll_open(i)) {
#if MCM_DBG_ERR_LOG
					printk("\nmcm:hsi_ll_open failed for channel %d\n",i);
#endif
					return -EINVAL;
				}

				hsi_channels[i].info = info;

				spin_lock_bh(&hsi_channels[i].lock);
				hsi_channels[i].state = HSI_CH_BUSY;
				spin_unlock_bh(&hsi_channels[i].lock);

				hsi_channels[i].notify = notify_cb;
				switch(info->user)
				{
				case XMD_TTY:
					hsi_channels[i].read = hsi_ch_tty_read;
					hsi_channels[i].write = hsi_ch_tty_write;
				break;
				case XMD_NET:
					hsi_channels[i].read = hsi_ch_net_read;
					hsi_channels[i].write = hsi_ch_net_write;
				break;
				default:
#if MCM_DBG_ERR_LOG
					printk("\nmcm:Neither TTY nor NET \n");
#endif
					return -EINVAL;
				}

/*                                                                          
                                                                                               
                                                                                     
                                                                                      */
#if 0 /* ORIGINAL CODE */
				INIT_WORK(&hsi_channels[i].read_work, hsi_read_work);
				INIT_WORK(&hsi_channels[i].write_work, hsi_write_work);
#if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
				INIT_WORK(&hsi_channels[i].buf_retry_work, hsi_buf_retry_work);
#endif
#else /* HSI patch for MST test */
				PREPARE_WORK(&hsi_channels[i].read_work, hsi_read_work);
				PREPARE_WORK(&hsi_channels[i].write_work, hsi_write_work);
#if defined (HSI_LL_ENABLE_RX_BUF_RETRY_WQ)
				PREPARE_WORK(&hsi_channels[i].buf_retry_work, hsi_buf_retry_work);
#endif
#endif
/*                                                                         */

				return i;
			}
	}
#if MCM_DBG_ERR_LOG
	printk("\nmcm: Channel name not proper \n");
#endif
	return -EINVAL;
}
Ejemplo n.º 17
0
int hea_eq0_alloc(struct rhea_eq0 *eq0, struct hea_adapter *ap)
{
    int rc;
    struct hea_eq_context context_eq;
    struct hea_process process = { 0 };

    if (NULL == eq0) {
        rhea_error("Invalid parameters passed in");
        return -EINVAL;
    }

    rhea_debug("Create EQ0 for hypervisor");

    context_eq.cfg.eqe_count = 16;
    context_eq.cfg.coalesing2_delay = HEA_EQ_COALESING_DELAY_0;

    context_eq.cfg.generate_completion_events =
        HEA_EQ_GEN_COM_EVENT_DISABLE;

    context_eq.cfg.irq_type = HEA_IRQ_COALESING_2;

    /* make sure we create a real EQ0 */
    process.lpar = 0xFF;

    eq0->eq = rhea_eq_create(&process, &context_eq.cfg);
    if (NULL == eq0->eq) {
        rhea_error("Was not able to allocate EQ0");
        return -ENOMEM;
    }

    /* make sure that this EQ is released again */
    if (0 != eq0->eq->id) {
        rhea_error("Was not able to get EQ0");
        rhea_eq_destroy(eq0->eq);
        return -EPERM;
    }

    /* set base information */
    eq0->q.q_begin = (unsigned char *) eq0->eq->q.va;
    eq0->q.qe_size = sizeof(struct hea_eqe);
    eq0->q.qe_count = eq0->eq->q.size / sizeof(struct hea_eqe);

    /* initialise rest */
    heaq_init(&eq0->q);

    eq0->irq_workqueue = create_singlethread_workqueue("EQ0");
    if (NULL == eq0->irq_workqueue) {
        rhea_error("Was not able to allocate workqueue");
        return -ENOMEM;
    }

    /* prepare work queue */
    INIT_DELAYED_WORK(&eq0->irq_work, &eq0_scan_eq);
    PREPARE_DELAYED_WORK(&eq0->irq_work, &eq0_scan_eq);

    INIT_WORK(&eq0->timer_work, &hea_eq0_timer_pport_event);
    PREPARE_WORK(&eq0->timer_work, &hea_eq0_timer_pport_event);

    /* timer */
    setup_timer(&eq0->timer,
                hea_eq0_timer_callback, (ulong) eq0);

    rc = mod_timer(&eq0->timer,
                   jiffies +
                   msecs_to_jiffies(CONFIG_POWEREN_RHEA_TIMER_MS));
    if (rc) {
        rhea_error("Error in mod_timer");
        goto out;
    }


    rc = rhea_interrupts_setup(eq0->eq, ap->name, ap->hwirq_base,
                               ap->hwirq_count, eq0_irq_handler, eq0);
    if (rc) {
        rhea_error("Was not able to register interupt "
                   "handler for EQ0");
        goto out;
    }

    spin_lock_init(&eq0->lock);

    return 0;
out:
    if (eq0->eq)
        rhea_eq_destroy(eq0->eq);

    if (eq0->irq_workqueue)
        destroy_workqueue(eq0->irq_workqueue);

    del_timer(&eq0->timer);

    return rc;
}