示例#1
0
static void verbs_add_device (struct ib_device *dev)
{
	int ret;
	struct ib_qp_init_attr attrs;

	if (ib_dev)
		return;

	/* durty hack for ib_dma_map_single not to segfault */
	dev->dma_ops = NULL;
	ib_dev = dev;

	printk (KERN_INFO "IB add device called. Name = %s\n", dev->name);

	ret = ib_query_device (dev, &dev_attr);
	if (ret) {
		printk (KERN_INFO "ib_quer_device failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "IB device caps: max_qp %d, max_mcast_grp: %d, max_pkeys: %d\n",
		dev_attr.max_qp, dev_attr.max_mcast_grp, (int)dev_attr.max_pkeys);

	/* We'll work with first port. It's a sample module, anyway. Who is that moron which decided
	 * to count ports from one? */
	ret = ib_query_port (dev, 1, &port_attr);
	if (ret) {
		printk (KERN_INFO "ib_query_port failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "Port info: lid: %u, sm_lid: %u, max_msg_size: %u\n",
		(unsigned)port_attr.lid, (unsigned)port_attr.sm_lid, port_attr.max_msg_sz);

	pd = ib_alloc_pd (dev);
	if (IS_ERR (pd)) {
		ret = PTR_ERR (pd);
		printk (KERN_INFO "pd allocation failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "PD allocated\n");

	mr = ib_get_dma_mr (pd, IB_ACCESS_LOCAL_WRITE);
	if (IS_ERR (mr)) {
		ret = PTR_ERR (mr);
		printk (KERN_INFO "get_dma_mr failed: %d\n", ret);
		return;
	}

	send_cq = ib_create_cq (dev, NULL, NULL, NULL, 1, 1);
	if (IS_ERR (send_cq)) {
		ret = PTR_ERR (send_cq);
		printk (KERN_INFO "ib_create_cq failed: %d\n", ret);
		return;
	}

	recv_cq = ib_create_cq (dev, verbs_comp_handler_recv, NULL, NULL, 1, 1);
	if (IS_ERR (recv_cq)) {
		ret = PTR_ERR (recv_cq);
		printk (KERN_INFO "ib_create_cq failed: %d\n", ret);
		return;
	}

	ib_req_notify_cq (recv_cq, IB_CQ_NEXT_COMP);
	printk (KERN_INFO "CQs allocated\n");

	ib_query_pkey (dev, 1, 0, &pkey);

	/* allocate memory */
	send_buf = kmalloc (buf_size + 40, GFP_KERNEL);
	recv_buf = kmalloc (buf_size + 40, GFP_KERNEL);

	if (!send_buf || !recv_buf) {
		printk (KERN_INFO "Memory allocation error\n");
		return;
	}

	printk (KERN_INFO "Trying to register regions\n");
	if (ib_dev->dma_ops)
		printk (KERN_INFO "DMA ops are defined\n");

	memset (send_buf, 0, buf_size+40);
	memset (send_buf, 0, buf_size+40);

	send_key = ib_dma_map_single (ib_dev, send_buf, buf_size, DMA_FROM_DEVICE);
	printk (KERN_INFO "send_key obtained %llx\n", send_key);
	recv_key = ib_dma_map_single (ib_dev, recv_buf, buf_size, DMA_TO_DEVICE);
	printk (KERN_INFO "recv_key obtained %llx\n", recv_key);

	if (ib_dma_mapping_error (ib_dev, send_key)) {
		printk (KERN_INFO "Error mapping send buffer\n");
		return;
	}

	if (ib_dma_mapping_error (ib_dev, recv_key)) {
		printk (KERN_INFO "Error mapping recv buffer\n");
		return;
	}

	memset (&attrs, 0, sizeof (attrs));
	attrs.qp_type = IB_QPT_UD;
	attrs.sq_sig_type = IB_SIGNAL_ALL_WR;
	attrs.event_handler = verbs_qp_event;
	attrs.cap.max_send_wr = CQ_SIZE;
	attrs.cap.max_recv_wr = CQ_SIZE;
	attrs.cap.max_send_sge = 1;
	attrs.cap.max_recv_sge = 1;
	attrs.send_cq = send_cq;
	attrs.recv_cq = recv_cq;

	qp = ib_create_qp (pd, &attrs);
	if (IS_ERR (qp)) {
		ret = PTR_ERR (qp);
		printk (KERN_INFO "qp allocation failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "Create QP with num %x\n", qp->qp_num);

	if (init_qp (qp)) {
		printk (KERN_INFO "Failed to initialize QP\n");
		return;
	}

	ret = ib_query_gid (ib_dev, 1, 0, &local_info.gid);
	if (ret) {
		printk (KERN_INFO "query_gid failed %d\n", ret);
		return;
	}

	local_info.qp_num = qp->qp_num;
	local_info.lid = port_attr.lid;

	/* now we are ready to send our QP number and other stuff to other party */
	if (!server_addr) {
		schedule_work (&sock_accept);
		flush_scheduled_work ();
	}
	else
		exchange_info (server_addr);

	if (!have_remote_info) {
		printk (KERN_INFO "Have no remote info, give up\n");
		return;
	}

	ret = path_rec_lookup_start ();
	if (ret) {
		printk (KERN_INFO "path_rec lookup start failed: %d\n", ret);
		return;
	}

	/* post receive request */
	verbs_post_recv_req ();

	mod_timer (&verbs_timer, NEXTJIFF(1));
}
示例#2
0
int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
{
	struct skt_dev_info *sinfo;
	int ret, i;

	sinfo = kmalloc(sizeof(struct skt_dev_info), GFP_KERNEL);
	if (!sinfo) {
		ret = -ENOMEM;
		goto out;
	}

	memset(sinfo, 0, sizeof(struct skt_dev_info));
	sinfo->nskt = nr;

	/*
	 * Initialise the per-socket structure.
	 */
	for (i = 0; i < nr; i++) {
		struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);
		memset(skt, 0, sizeof(*skt));

		skt->socket.resource_ops = &pccard_static_ops;
		skt->socket.ops = &au1x00_pcmcia_operations;
		skt->socket.owner = ops->owner;
		skt->socket.dev.dev = dev;

		init_timer(&skt->poll_timer);
		skt->poll_timer.function = au1x00_pcmcia_poll_event;
		skt->poll_timer.data = (unsigned long)skt;
		skt->poll_timer.expires = jiffies + AU1000_PCMCIA_POLL_PERIOD;

		skt->nr		= first + i;
		skt->irq	= 255;
		skt->dev	= dev;
		skt->ops	= ops;

		skt->res_skt.name	= skt_names[skt->nr];
		skt->res_io.name	= "io";
		skt->res_io.flags	= IORESOURCE_MEM | IORESOURCE_BUSY;
		skt->res_mem.name	= "memory";
		skt->res_mem.flags	= IORESOURCE_MEM;
		skt->res_attr.name	= "attribute";
		skt->res_attr.flags	= IORESOURCE_MEM;

		/*
		 * PCMCIA client drivers use the inb/outb macros to access the
		 * IO registers. Since mips_io_port_base is added to the
		 * access address of the mips implementation of inb/outb,
		 * we need to subtract it here because we want to access the
		 * I/O or MEM address directly, without going through this
		 * "mips_io_port_base" mechanism.
		 */
		if (i == 0) {
			skt->virt_io = (void *)
				(ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) -
				(u32)mips_io_port_base);
			skt->phys_attr = AU1X_SOCK0_PSEUDO_PHYS_ATTR;
			skt->phys_mem = AU1X_SOCK0_PSEUDO_PHYS_MEM;
		}
#ifndef CONFIG_MIPS_XXS1500
		else  {
			skt->virt_io = (void *)
				(ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) -
				(u32)mips_io_port_base);
			skt->phys_attr = AU1X_SOCK1_PSEUDO_PHYS_ATTR;
			skt->phys_mem = AU1X_SOCK1_PSEUDO_PHYS_MEM;
		}
#endif
		pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io;
		ret = ops->hw_init(skt);

		skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
		skt->socket.irq_mask = 0;
		skt->socket.map_size = MAP_SIZE;
		skt->socket.pci_irq = skt->irq;
		skt->socket.io_offset = (unsigned long)skt->virt_io;

		skt->status = au1x00_pcmcia_skt_state(skt);

		ret = pcmcia_register_socket(&skt->socket);
		if (ret)
			goto out_err;

		WARN_ON(skt->socket.sock != i);

		add_timer(&skt->poll_timer);
	}

	dev_set_drvdata(dev, sinfo);
	return 0;

	do {
		struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);

		del_timer_sync(&skt->poll_timer);
		pcmcia_unregister_socket(&skt->socket);
out_err:
		flush_scheduled_work();
		ops->hw_shutdown(skt);

		i--;
	} while (i > 0);
	kfree(sinfo);
out:
	return ret;
}
示例#3
0
static int __devinit tosa_bat_probe(struct platform_device *dev)
{
	int ret;
	int i;

	if (!machine_is_tosa())
		return -ENODEV;

	for (i = 0; i < ARRAY_SIZE(gpios); i++) {
		ret = gpio_request(gpios[i].gpio, gpios[i].name);
		if (ret) {
			i--;
			goto err_gpio;
		}

		if (gpios[i].output)
			ret = gpio_direction_output(gpios[i].gpio,
					gpios[i].value);
		else
			ret = gpio_direction_input(gpios[i].gpio);

		if (ret)
			goto err_gpio;
	}

	mutex_init(&tosa_bat_main.work_lock);
	mutex_init(&tosa_bat_jacket.work_lock);

	INIT_WORK(&bat_work, tosa_bat_work);

	ret = power_supply_register(&dev->dev, &tosa_bat_main.psy);
	if (ret)
		goto err_psy_reg_main;
	ret = power_supply_register(&dev->dev, &tosa_bat_jacket.psy);
	if (ret)
		goto err_psy_reg_jacket;
	ret = power_supply_register(&dev->dev, &tosa_bat_bu.psy);
	if (ret)
		goto err_psy_reg_bu;

	ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG),
				tosa_bat_gpio_isr,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				"main full", &tosa_bat_main);
	if (ret)
		goto err_req_main;

	ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG),
				tosa_bat_gpio_isr,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				"jacket full", &tosa_bat_jacket);
	if (ret)
		goto err_req_jacket;

	ret = request_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT),
				tosa_bat_gpio_isr,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				"jacket detect", &tosa_bat_jacket);
	if (!ret) {
		schedule_work(&bat_work);
		return 0;
	}

	free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
err_req_jacket:
	free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
err_req_main:
	power_supply_unregister(&tosa_bat_bu.psy);
err_psy_reg_bu:
	power_supply_unregister(&tosa_bat_jacket.psy);
err_psy_reg_jacket:
	power_supply_unregister(&tosa_bat_main.psy);
err_psy_reg_main:

	
	flush_scheduled_work();

	i--;
err_gpio:
	for (; i >= 0; i--)
		gpio_free(gpios[i].gpio);

	return ret;
}
示例#4
0
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
	int retval;
	struct tty_ldisc *o_ldisc, *new_ldisc;
	int work, o_work = 0;
	struct tty_struct *o_tty;

	new_ldisc = tty_ldisc_get(ldisc);
	if (IS_ERR(new_ldisc))
		return PTR_ERR(new_ldisc);

	/*
	 *	We need to look at the tty locking here for pty/tty pairs
	 *	when both sides try to change in parallel.
	 */

	o_tty = tty->link;	/* o_tty is the pty side or NULL */


	/*
	 *	Check the no-op case
	 */

	if (tty->ldisc->ops->num == ldisc) {
		tty_ldisc_put(new_ldisc);
		return 0;
	}

	/*
	 *	Problem: What do we do if this blocks ?
	 *	We could deadlock here
	 */

	tty_wait_until_sent(tty, 0);

	mutex_lock(&tty->ldisc_mutex);

	/*
	 *	We could be midstream of another ldisc change which has
	 *	dropped the lock during processing. If so we need to wait.
	 */

	while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
		mutex_unlock(&tty->ldisc_mutex);
		wait_event(tty_ldisc_wait,
			test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
		mutex_lock(&tty->ldisc_mutex);
	}
	set_bit(TTY_LDISC_CHANGING, &tty->flags);

	/*
	 *	No more input please, we are switching. The new ldisc
	 *	will update this value in the ldisc open function
	 */

	tty->receive_room = 0;

	o_ldisc = tty->ldisc;
	/*
	 *	Make sure we don't change while someone holds a
	 *	reference to the line discipline. The TTY_LDISC bit
	 *	prevents anyone taking a reference once it is clear.
	 *	We need the lock to avoid racing reference takers.
	 *
	 *	We must clear the TTY_LDISC bit here to avoid a livelock
	 *	with a userspace app continually trying to use the tty in
	 *	parallel to the change and re-referencing the tty.
	 */

	work = tty_ldisc_halt(tty);
	if (o_tty)
		o_work = tty_ldisc_halt(o_tty);

	/*
	 * Wait for ->hangup_work and ->buf.work handlers to terminate.
	 * We must drop the mutex here in case a hangup is also in process.
	 */

	mutex_unlock(&tty->ldisc_mutex);

	flush_scheduled_work();

	mutex_lock(&tty->ldisc_mutex);
	if (test_bit(TTY_HUPPED, &tty->flags)) {
		/* We were raced by the hangup method. It will have stomped
		   the ldisc data and closed the ldisc down */
		clear_bit(TTY_LDISC_CHANGING, &tty->flags);
		mutex_unlock(&tty->ldisc_mutex);
		tty_ldisc_put(new_ldisc);
		return -EIO;
	}

	/* Shutdown the current discipline. */
	tty_ldisc_close(tty, o_ldisc);

	/* Now set up the new line discipline. */
	tty_ldisc_assign(tty, new_ldisc);
	tty_set_termios_ldisc(tty, ldisc);

	retval = tty_ldisc_open(tty, new_ldisc);
	if (retval < 0) {
		/* Back to the old one or N_TTY if we can't */
		tty_ldisc_put(new_ldisc);
		tty_ldisc_restore(tty, o_ldisc);
	}

	/* At this point we hold a reference to the new ldisc and a
	   a reference to the old ldisc. If we ended up flipping back
	   to the existing ldisc we have two references to it */

	if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
		tty->ops->set_ldisc(tty);

	tty_ldisc_put(o_ldisc);

	/*
	 *	Allow ldisc referencing to occur again
	 */

	tty_ldisc_enable(tty);
	if (o_tty)
		tty_ldisc_enable(o_tty);

	/* Restart the work queue in case no characters kick it off. Safe if
	   already running */
	if (work)
		schedule_delayed_work(&tty->buf.work, 1);
	if (o_work)
		schedule_delayed_work(&o_tty->buf.work, 1);
	mutex_unlock(&tty->ldisc_mutex);
	return retval;
}
int
ieee80211softmac_wx_set_essid(struct net_device *net_dev,
			      struct iw_request_info *info,
			      union iwreq_data *data,
			      char *extra)
{
	struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
	struct ieee80211softmac_network *n;
	struct ieee80211softmac_auth_queue_item *authptr;
	int length = 0;

	mutex_lock(&sm->associnfo.mutex);

	/* Check if we're already associating to this or another network
	 * If it's another network, cancel and start over with our new network
	 * If it's our network, ignore the change, we're already doing it!
	 */
	if((sm->associnfo.associating || sm->associnfo.associated) &&
	   (data->essid.flags && data->essid.length)) {
		/* Get the associating network */
		n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
		if(n && n->essid.len == data->essid.length &&
		   !memcmp(n->essid.data, extra, n->essid.len)) {
			dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
				MAC_ARG(sm->associnfo.bssid));
			goto out;
		} else {
			dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
			/* Cancel assoc work */
			cancel_delayed_work(&sm->associnfo.work);
			/* We don't have to do this, but it's a little cleaner */
			list_for_each_entry(authptr, &sm->auth_queue, list)
				cancel_delayed_work(&authptr->work);
			sm->associnfo.bssvalid = 0;
			sm->associnfo.bssfixed = 0;
			flush_scheduled_work();
			sm->associnfo.associating = 0;
			sm->associnfo.associated = 0;
		}
	}


	sm->associnfo.static_essid = 0;
	sm->associnfo.assoc_wait = 0;

	if (data->essid.flags && data->essid.length) {
		length = min((int)data->essid.length, IW_ESSID_MAX_SIZE);
		if (length) {
			memcpy(sm->associnfo.req_essid.data, extra, length);
			sm->associnfo.static_essid = 1;
		}
	}

	/* set our requested ESSID length.
	 * If applicable, we have already copied the data in */
	sm->associnfo.req_essid.len = length;

	sm->associnfo.associating = 1;
	/* queue lower level code to do work (if necessary) */
	schedule_delayed_work(&sm->associnfo.work, 0);
out:
	mutex_unlock(&sm->associnfo.mutex);

	return 0;
}
示例#6
0
static int akita_ioexp_suspend(struct platform_device *pdev, pm_message_t state)
{
	flush_scheduled_work();
	return 0;
}
示例#7
0
static ssize_t bq_proc_write(struct file *filp, 
    	const char *buff,unsigned long len, void *data)
{
    struct bq27541_info *di=data;
    u32 reg_val,value;
    int event  = USB_EVENT_VBUS;
    char messages[256], vol[256];

    if (len > 256)
    	len = 256;

    if (copy_from_user(messages, buff, len))
    	return -EFAULT;
    
    if ('-' == messages[0]) {
    	/* set the register index */
    	//memcpy(vol, messages+1, len-1);
    	index = (int) simple_strtoul(messages+1, NULL, 10);
        fake_temp=index*10;
    	printk(KERN_INFO"%s:fake_temp %d\n",__func__, fake_temp);
    }else if('+'==messages[0]) {
    	// set the register value 
    	//memcpy(vol, messages+1, len-1);
    	reg_val = (int)simple_strtoul(messages+1, NULL, 10);
        fake_full_available_capacity=reg_val*100;
        printk(KERN_INFO"%s:fake_full_available_capacity %d\n",__func__, fake_full_available_capacity);
    	//you can read value from the register of hardwae in here
    	//i2c_smbus_write_byte_data(di->bat_client, index, reg_val);
    	//printk(KERN_INFO"%s:register 0x%x: set value 0x%x\n",__func__, index, reg_val);
    }else if ('!' == messages[0]) {
        switch(messages[1]){
            case 'a':
                cancel_delayed_work(&di->bat_monitor_work);
                flush_scheduled_work();
            break;
            case 'b':
                queue_delayed_work(bat_work_queue,&di->bat_monitor_work,msecs_to_jiffies(1000 * 1));
            break;
            case 'c':   
                di->capacity=0;
                power_supply_changed(&di->bat);
            break;
            case 'd':
                blocking_notifier_call_chain(&notifier_list, EVENT_UNKNOW_BATTERY, NULL);
            break;
            case 'e':
                blocking_notifier_call_chain(&notifier_list, EVENT_RECOGNIZE_BATTERY, NULL);
            break;
            case 'f':
                blocking_notifier_call_chain(&notifier_list, EVENT_NOT_RECOGNIZE_BATTERY, NULL);
            break;
            case 'g':
                blocking_notifier_call_chain(&notifier_list, EVENT_WEAK_BATTERY, NULL);
            break;
            case 'h':
                blocking_notifier_call_chain(&notifier_list, EVENT_BATTERY_I2C_ERROR, NULL);
            break;
            case 'i':
                blocking_notifier_call_chain(&notifier_list, EVENT_FULL_BATTERY, NULL);
            break;
            case 'j':
                blocking_notifier_call_chain(&notifier_list, EVENT_RECHARGE_BATTERY, NULL);
            break;
            case 'k':
                blocking_notifier_call_chain(&notifier_list, EVENT_BATTERY_NTC_ZERO, NULL);
            break;
            case 'l':
                blocking_notifier_call_chain(&notifier_list, EVENT_BATTERY_NTC_NORMAL, NULL);
            break;
            case '1'://temp < -20
                fake_temp=-200;
                //blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_1, NULL);                
            break;
            case '2'://-20 < temp < 0 , Stop Charge
                fake_temp=-250;
                //blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_2, NULL);                  
            break;
            case '3'://0 < temp < 14 , 0.18C (max) to 4.0V
                fake_temp=-100;
                //blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_3, NULL);  
            break;
            case '4'://8 < temp < 14 , 0.18C (max) to 4.2V
                blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_4, NULL);                
            break;
            case '5'://14 < temp < 23 , 0.5C (max) to 4.2V
                blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_5, NULL);                  
            break;
            case '6'://23 < temp < 45 , 0.7C (max) to 4.2V
                blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_6, NULL);  
            break;
            case '7'://45 < temp < 60 , 0.5C (max) to 4V
                blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_7, NULL);  
            break;
            case '8'://60 > temp , Stop Charge and Shutdown
                blocking_notifier_call_chain(&notifier_list, EVENT_TEMP_PROTECT_STEP_8, NULL);  
            break;
    	}
    }
    return len;
}
示例#8
0
static
void __exit i2400ms_driver_exit(void)
{
	flush_scheduled_work();	/* for the stuff we schedule */
	sdio_unregister_driver(&i2400m_sdio_driver);
}
示例#9
0
int netback_accel_probe(struct xenbus_device *dev)
{
	struct netback_accel *bend;
	struct backend_info *binfo;
	int err;

	DPRINTK("%s: passed device %s\n", __FUNCTION__, dev->nodename);

	/* Allocate structure to store all our state... */
	bend = kzalloc(sizeof(struct netback_accel), GFP_KERNEL);
	if (bend == NULL) {
		DPRINTK("%s: no memory for bend\n", __FUNCTION__);
		return -ENOMEM;
	}
	
	mutex_init(&bend->bend_mutex);

	mutex_lock(&bend->bend_mutex);

	/* ...and store it where we can get at it */
	binfo = (struct backend_info *) dev->dev.driver_data;
	binfo->netback_accel_priv = bend;
	/* And vice-versa */
	bend->hdev_data = dev;

	DPRINTK("%s: Adding bend %p to list\n", __FUNCTION__, bend);
	
	init_waitqueue_head(&bend->state_wait_queue);
	bend->vnic_is_setup = 0;
	bend->frontend_state = XenbusStateUnknown;
	bend->backend_state = XenbusStateClosed;
	bend->removing = 0;

	sscanf(dev->nodename, NODENAME_PATH_FMT, &bend->far_end, 
	       &bend->vif_num);

	err = read_nicname(dev, bend);
	if (err) {
		/*
		 * Technically not an error, just means we're not 
		 * supposed to accelerate this
		 */
		DPRINTK("failed to get device name\n");
		goto fail_nicname;
	}

	/*
	 * Look up the device name in the list of NICs provided by
	 * driverlink to get the hardware type.
	 */
	err = netback_accel_sf_hwtype(bend);
	if (err) {
		/*
		 * Technically not an error, just means we're not
		 * supposed to accelerate this, probably belongs to
		 * some other backend
		 */
		DPRINTK("failed to match device name\n");
		goto fail_init_type;
	}

	err = publish_frontend_name(dev);
	if (err)
		goto fail_publish;

	err = netback_accel_debugfs_create(bend);
	if (err)
		goto fail_debugfs;
	
	mutex_unlock(&bend->bend_mutex);

	err = setup_config_accel_watch(dev, bend);
	if (err)
		goto fail_config_watch;

	err = setup_domu_accel_watch(dev, bend);
	if (err)
		goto fail_domu_watch;

	/*
	 * Indicate to the other end that we're ready to start unless
	 * the watch has already fired.
	 */
	mutex_lock(&bend->bend_mutex);
	if (bend->backend_state == XenbusStateClosed) {
		bend->backend_state = XenbusStateInitialising;
		net_accel_update_state(dev, XenbusStateInitialising);
	}
	mutex_unlock(&bend->bend_mutex);

	mutex_lock(&bend_list_mutex);
	link_bend(bend);
	mutex_unlock(&bend_list_mutex);

	return 0;

fail_domu_watch:

	unregister_xenbus_watch(&bend->config_accel_watch);
	kfree(bend->config_accel_watch.node);
fail_config_watch:

	/*
	 * Flush the scheduled work queue before freeing bend to get
	 * rid of any pending netback_accel_msg_rx_handler()
	 */
	flush_scheduled_work();

	mutex_lock(&bend->bend_mutex);
	net_accel_update_state(dev, XenbusStateUnknown);
	netback_accel_debugfs_remove(bend);
fail_debugfs:

	unpublish_frontend_name(dev);
fail_publish:

	/* No need to reverse netback_accel_sf_hwtype. */
fail_init_type:

	kfree(bend->nicname);
fail_nicname:
	binfo->netback_accel_priv = NULL;
	mutex_unlock(&bend->bend_mutex);
	kfree(bend);
	return err;
}
示例#10
0
void o2quo_exit(void)
{
	flush_scheduled_work();
}
示例#11
0
文件: aoedev.c 项目: BillTheBest/aoe
static int
flush(const char __user *str, size_t cnt, int exiting)
{
	ulong flags;
	struct aoedev *d, **dd;
	char buf[16];
	int all = 0;
	int specified = 0;	/* flush a specific device */
	unsigned int skipflags;

	skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;

	if (!exiting && cnt >= 3) {
		if (cnt > sizeof buf)
			cnt = sizeof buf;
		if (copy_from_user(buf, str, cnt))
			return -EFAULT;
		all = !strncmp(buf, "all", 3);
		if (!all)
			specified = 1;
	}

	flush_scheduled_work();
	/* pass one: without sleeping, do aoedev_downdev */
	spin_lock_irqsave(&devlist_lock, flags);
	for (d = devlist; d; d = d->next) {
		spin_lock(&d->lock);
		if (exiting) {
			/* unconditionally take each device down */
		} else if (specified) {
			if (!user_req(buf, cnt, d))
				goto cont;
		} else if ((!all && (d->flags & DEVFL_UP))
		|| d->flags & skipflags
		|| d->nopen
		|| d->ref)
			goto cont;

		aoedev_downdev(d);
		d->flags |= DEVFL_TKILL;
cont:
		spin_unlock(&d->lock);
	}
	spin_unlock_irqrestore(&devlist_lock, flags);

	/* pass two: call freedev, which might sleep,
	 * for aoedevs marked with DEVFL_TKILL
	 */
restart:
	spin_lock_irqsave(&devlist_lock, flags);
	for (d = devlist; d; d = d->next) {
		spin_lock(&d->lock);
		if (d->flags & DEVFL_TKILL
		&& !(d->flags & DEVFL_FREEING)) {
			spin_unlock(&d->lock);
			spin_unlock_irqrestore(&devlist_lock, flags);
			freedev(d);
			goto restart;
		}
		spin_unlock(&d->lock);
	}

	/* pass three: remove aoedevs marked with DEVFL_FREED */
	for (dd = &devlist, d = *dd; d; d = *dd) {
		struct aoedev *doomed = NULL;

		spin_lock(&d->lock);
		if (d->flags & DEVFL_FREED) {
			*dd = d->next;
			doomed = d;
		} else {
			dd = &d->next;
		}
		spin_unlock(&d->lock);
		if (doomed)
			kfree(doomed->targets);
		kfree(doomed);
	}
	spin_unlock_irqrestore(&devlist_lock, flags);

	return 0;
}
void cifs_dfs_release_automount_timer(void)
{
	BUG_ON(!list_empty(&cifs_dfs_automount_list));
	cancel_delayed_work(&cifs_dfs_automount_task);
	flush_scheduled_work();
}
示例#13
0
void
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
{
	flush_scheduled_work();
}
示例#14
0
static int wm97xx_bat_suspend(struct device *dev)
{
	flush_scheduled_work();
	return 0;
}
示例#15
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}
	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	err = mei_register(dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	pr_debug("initialization successful.\n");

	return 0;

release_irq:
	mei_disable_interrupts(dev);
	flush_scheduled_work();
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
示例#16
0
int netback_accel_remove(struct xenbus_device *dev)
{
	struct backend_info *binfo;
	struct netback_accel *bend; 
	int frontend_state;

	binfo = (struct backend_info *) dev->dev.driver_data;
	bend = (struct netback_accel *) binfo->netback_accel_priv;

	DPRINTK("%s: dev %p bend %p\n", __FUNCTION__, dev, bend);
	
	BUG_ON(bend == NULL);
	
	mutex_lock(&bend_list_mutex);
	unlink_bend(bend);
	mutex_unlock(&bend_list_mutex);

	mutex_lock(&bend->bend_mutex);

	/* Reject any requests to connect. */
	bend->removing = 1;

	/*
	 * Switch to closing to tell the other end that we're going
	 * away.
	 */
	if (bend->backend_state != XenbusStateClosing) {
		bend->backend_state = XenbusStateClosing;
		net_accel_update_state(dev, XenbusStateClosing);
	}

	frontend_state = (int)XenbusStateUnknown;
	xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
		     &frontend_state);

	mutex_unlock(&bend->bend_mutex);

	/*
	 * Wait until this end goes to the closed state.  This happens
	 * in response to the other end going to the closed state.
	 * Don't bother doing this if the other end is already closed
	 * because if it is then there is nothing to do.
	 */
	if (frontend_state != (int)XenbusStateClosed &&
	    frontend_state != (int)XenbusStateUnknown)
		wait_event(bend->state_wait_queue,
			   bend->backend_state == XenbusStateClosed);

	unregister_xenbus_watch(&bend->domu_accel_watch);
	kfree(bend->domu_accel_watch.node);

	unregister_xenbus_watch(&bend->config_accel_watch);
	kfree(bend->config_accel_watch.node);

	/*
	 * Flush the scheduled work queue before freeing bend to get
	 * rid of any pending netback_accel_msg_rx_handler()
	 */
	flush_scheduled_work();

	mutex_lock(&bend->bend_mutex);

	/* Tear down the vnic if it was set up. */
	if (bend->vnic_is_setup) {
		bend->vnic_is_setup = 0;
		cleanup_vnic(bend);
	}

	bend->backend_state = XenbusStateUnknown;
	net_accel_update_state(dev, XenbusStateUnknown);

	netback_accel_debugfs_remove(bend);

	unpublish_frontend_name(dev);

	kfree(bend->nicname);

	binfo->netback_accel_priv = NULL;

	mutex_unlock(&bend->bend_mutex);

	kfree(bend);

	return 0;
}
void mali_core_scaling_term(void)
{
	flush_scheduled_work();
}
示例#18
0
static
void __exit i2400mu_driver_exit(void)
{
	flush_scheduled_work();	
	usb_deregister(&i2400mu_driver);
}
示例#19
0
static int menelaus_probe(struct i2c_client *client,
			  const struct i2c_device_id *id)
{
	struct menelaus_chip	*menelaus;
	int			rev = 0, val;
	int			err = 0;
	struct menelaus_platform_data *menelaus_pdata =
					client->dev.platform_data;

	if (the_menelaus) {
		dev_dbg(&client->dev, "only one %s for now\n",
				DRIVER_NAME);
		return -ENODEV;
	}

	menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
	if (!menelaus)
		return -ENOMEM;

	i2c_set_clientdata(client, menelaus);

	the_menelaus = menelaus;
	menelaus->client = client;

	/* If a true probe check the device */
	rev = menelaus_read_reg(MENELAUS_REV);
	if (rev < 0) {
		dev_err(&client->dev, "device not found");
		err = -ENODEV;
		goto fail1;
	}

	/* Ack and disable all Menelaus interrupts */
	menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
	menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
	menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
	menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
	menelaus->mask1 = 0xff;
	menelaus->mask2 = 0xff;

	/* Set output buffer strengths */
	menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);

	if (client->irq > 0) {
		err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
				  DRIVER_NAME, menelaus);
		if (err) {
			dev_dbg(&client->dev,  "can't get IRQ %d, err %d",
					client->irq, err);
			goto fail1;
		}
	}

	mutex_init(&menelaus->lock);
	INIT_WORK(&menelaus->work, menelaus_work);

	dev_info(&client->dev, "Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);

	val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
	if (val < 0)
		goto fail2;
	if (val & (1 << 7))
		menelaus->vcore_hw_mode = 1;
	else
		menelaus->vcore_hw_mode = 0;

	if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
		err = menelaus_pdata->late_init(&client->dev);
		if (err < 0)
			goto fail2;
	}

	menelaus_rtc_init(menelaus);

	return 0;
fail2:
	free_irq(client->irq, menelaus);
	flush_scheduled_work();
fail1:
	kfree(menelaus);
	return err;
}
示例#20
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int __devinit mei_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct mei_device *dev;
	int err;

	mutex_lock(&mei_mutex);
	if (mei_device) {
		err = -EEXIST;
		goto end;
	}
	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		printk(KERN_ERR "mei: Failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, mei_driver_name);
	if (err) {
		printk(KERN_ERR "mei: Failed to get pci regions.\n");
		goto disable_device;
	}
	/* allocates and initializes the mei dev structure */
	dev = init_mei_device(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	/* mapping  IO device memory */
	dev->mem_addr = pci_iomap(pdev, 0, 0);
	if (!dev->mem_addr) {
		printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	/* request and enable interrupt   */
	err = request_threaded_irq(pdev->irq,
			mei_interrupt_quick_handler,
			mei_interrupt_thread_handler,
			IRQF_SHARED, mei_driver_name, dev);
	if (err) {
		printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto unmap_memory;
	}
	INIT_DELAYED_WORK(&dev->wd_work, mei_wd_timer);
	if (mei_hw_init(dev)) {
		printk(KERN_ERR "mei: Init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}
	mei_device = pdev;
	pci_set_drvdata(pdev, dev);
	schedule_delayed_work(&dev->wd_work, HZ);

	mutex_unlock(&mei_mutex);

	pr_debug("mei: Driver initialization successful.\n");

	return 0;

release_irq:
	/* disable interrupts */
	dev->host_hw_state = mei_hcsr_read(dev);
	mei_disable_interrupts(dev);
	flush_scheduled_work();
	free_irq(pdev->irq, dev);
unmap_memory:
	pci_iounmap(pdev, dev->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	mutex_unlock(&mei_mutex);
	printk(KERN_ERR "mei: Driver initialization failed.\n");
	return err;
}
示例#21
0
/*
========================================================================
Routine Description:
    Release allocated resources.

Arguments:
    *dev				Point to the PCI or USB device
	pAd					driver control block pointer

Return Value:
    None

Note:
========================================================================
*/
static void rt2870_disconnect(struct usb_device *dev, VOID *pAd)
{
	struct net_device *net_dev;


	DBGPRINT(RT_DEBUG_ERROR, ("rtusb_disconnect: unregister usbnet usb-%s-%s\n",
				dev->bus->bus_name, dev->devpath));
	if (!pAd)
	{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)	/* kernel 2.4 series */
		while(MOD_IN_USE > 0)
		{
			MOD_DEC_USE_COUNT;
		}
#else
		usb_put_dev(dev);
#endif /* LINUX_VERSION_CODE */

		printk("rtusb_disconnect: pAd == NULL!\n");
		return;
	}
/*	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST); */
	RTMP_DRIVER_NIC_NOT_EXIST_SET(pAd);

	/* for debug, wait to show some messages to /proc system */
	udelay(1);


	RTMP_DRIVER_NET_DEV_GET(pAd, &net_dev);

	RtmpPhyNetDevExit(pAd, net_dev);

	/* FIXME: Shall we need following delay and flush the schedule?? */
	udelay(1);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)	/* kernel 2.4 series */
#else
	flush_scheduled_work();
#endif /* LINUX_VERSION_CODE */
	udelay(1);

#ifdef CONFIG_HAS_EARLYSUSPEND
	RTMP_DRIVER_ADAPTER_UNREGISTER_EARLYSUSPEND(pAd);
#endif
#ifdef RT_CFG80211_SUPPORT
	RTMP_DRIVER_80211_UNREGISTER(pAd, net_dev);
#endif /* RT_CFG80211_SUPPORT */

	/* free the root net_device */
//	RtmpOSNetDevFree(net_dev);

	RtmpRaDevCtrlExit(pAd);

	/* free the root net_device */
	RtmpOSNetDevFree(net_dev);

	/* release a use of the usb device structure */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)	/* kernel 2.4 series */
	while(MOD_IN_USE > 0)
	{
		MOD_DEC_USE_COUNT;
	}
#else
	usb_put_dev(dev);
#endif /* LINUX_VERSION_CODE */
	udelay(1);

	DBGPRINT(RT_DEBUG_ERROR, (" RTUSB disconnect successfully\n"));
}
示例#22
0
static
void __exit i2400mu_driver_exit(void)
{
	flush_scheduled_work();	/* for the stuff we schedule from sysfs.c */
	usb_deregister(&i2400mu_driver);
}
示例#23
0
static void cx231xx_ir_stop(struct cx231xx_IR *ir)
{
	del_timer_sync(&ir->timer);
	flush_scheduled_work();
}
示例#24
0
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
	int retval;
	struct tty_ldisc o_ldisc, new_ldisc;
	int work;
	unsigned long flags;
	struct tty_struct *o_tty;

restart:
	/* This is a bit ugly for now but means we can break the 'ldisc
	   is part of the tty struct' assumption later */
	retval = tty_ldisc_get(ldisc, &new_ldisc);
	if (retval)
		return retval;

	/*
	 *	Problem: What do we do if this blocks ?
	 */

	tty_wait_until_sent(tty, 0);

	if (tty->ldisc.ops->num == ldisc) {
		tty_ldisc_put(new_ldisc.ops);
		return 0;
	}

	/*
	 *	No more input please, we are switching. The new ldisc
	 *	will update this value in the ldisc open function
	 */

	tty->receive_room = 0;

	o_ldisc = tty->ldisc;
	o_tty = tty->link;

	/*
	 *	Make sure we don't change while someone holds a
	 *	reference to the line discipline. The TTY_LDISC bit
	 *	prevents anyone taking a reference once it is clear.
	 *	We need the lock to avoid racing reference takers.
	 *
	 *	We must clear the TTY_LDISC bit here to avoid a livelock
	 *	with a userspace app continually trying to use the tty in
	 *	parallel to the change and re-referencing the tty.
	 */
	clear_bit(TTY_LDISC, &tty->flags);
	if (o_tty)
		clear_bit(TTY_LDISC, &o_tty->flags);

	spin_lock_irqsave(&tty_ldisc_lock, flags);
	if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
		if (tty->ldisc.refcount) {
			/* Free the new ldisc we grabbed. Must drop the lock
			   first. */
			spin_unlock_irqrestore(&tty_ldisc_lock, flags);
			tty_ldisc_put(o_ldisc.ops);
			/*
			 * There are several reasons we may be busy, including
			 * random momentary I/O traffic. We must therefore
			 * retry. We could distinguish between blocking ops
			 * and retries if we made tty_ldisc_wait() smarter.
			 * That is up for discussion.
			 */
			if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
				return -ERESTARTSYS;
			goto restart;
		}
		if (o_tty && o_tty->ldisc.refcount) {
			spin_unlock_irqrestore(&tty_ldisc_lock, flags);
			tty_ldisc_put(o_tty->ldisc.ops);
			if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
				return -ERESTARTSYS;
			goto restart;
		}
	}
	/*
	 *	If the TTY_LDISC bit is set, then we are racing against
	 *	another ldisc change
	 */
	if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
		struct tty_ldisc *ld;
		spin_unlock_irqrestore(&tty_ldisc_lock, flags);
		tty_ldisc_put(new_ldisc.ops);
		ld = tty_ldisc_ref_wait(tty);
		tty_ldisc_deref(ld);
		goto restart;
	}
	/*
	 *	This flag is used to avoid two parallel ldisc changes. Once
	 *	open and close are fine grained locked this may work better
	 *	as a mutex shared with the open/close/hup paths
	 */
	set_bit(TTY_LDISC_CHANGING, &tty->flags);
	if (o_tty)
		set_bit(TTY_LDISC_CHANGING, &o_tty->flags);
	spin_unlock_irqrestore(&tty_ldisc_lock, flags);
	
	/*
	 *	From this point on we know nobody has an ldisc
	 *	usage reference, nor can they obtain one until
	 *	we say so later on.
	 */

	work = cancel_delayed_work(&tty->buf.work);
	/*
	 * Wait for ->hangup_work and ->buf.work handlers to terminate
	 * MUST NOT hold locks here.
	 */
	flush_scheduled_work();
	/* Shutdown the current discipline. */
	if (o_ldisc.ops->close)
		(o_ldisc.ops->close)(tty);

	/* Now set up the new line discipline. */
	tty_ldisc_assign(tty, &new_ldisc);
	tty_set_termios_ldisc(tty, ldisc);
	if (new_ldisc.ops->open)
		retval = (new_ldisc.ops->open)(tty);
	if (retval < 0) {
		tty_ldisc_put(new_ldisc.ops);
		tty_ldisc_restore(tty, &o_ldisc);
	}
	/* At this point we hold a reference to the new ldisc and a
	   a reference to the old ldisc. If we ended up flipping back
	   to the existing ldisc we have two references to it */

	if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc)
		tty->ops->set_ldisc(tty);

	tty_ldisc_put(o_ldisc.ops);

	/*
	 *	Allow ldisc referencing to occur as soon as the driver
	 *	ldisc callback completes.
	 */

	tty_ldisc_enable(tty);
	if (o_tty)
		tty_ldisc_enable(o_tty);

	/* Restart it in case no characters kick it off. Safe if
	   already running */
	if (work)
		schedule_delayed_work(&tty->buf.work, 1);
	return retval;
}
int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
{
	int ret;

	init_timer(&skt->poll_timer);
	skt->poll_timer.function = soc_common_pcmcia_poll_event;
	skt->poll_timer.data = (unsigned long)skt;
	skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;

	ret = request_resource(&iomem_resource, &skt->res_skt);
	if (ret)
		goto out_err_1;

	ret = request_resource(&skt->res_skt, &skt->res_io);
	if (ret)
		goto out_err_2;

	ret = request_resource(&skt->res_skt, &skt->res_mem);
	if (ret)
		goto out_err_3;

	ret = request_resource(&skt->res_skt, &skt->res_attr);
	if (ret)
		goto out_err_4;

	skt->virt_io = ioremap(skt->res_io.start, 0x10000);
	if (skt->virt_io == NULL) {
		ret = -ENOMEM;
		goto out_err_5;
	}

	mutex_lock(&soc_pcmcia_sockets_lock);

	list_add(&skt->node, &soc_pcmcia_sockets);

	/*
	 * We initialize default socket timing here, because
	 * we are not guaranteed to see a SetIOMap operation at
	 * runtime.
	 */
	skt->ops->set_timing(skt);

	ret = skt->ops->hw_init(skt);
	if (ret)
		goto out_err_6;

	skt->socket.ops = &soc_common_pcmcia_operations;
	skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
	skt->socket.resource_ops = &pccard_static_ops;
	skt->socket.irq_mask = 0;
	skt->socket.map_size = PAGE_SIZE;
	skt->socket.io_offset = (unsigned long)skt->virt_io;

	skt->status = soc_common_pcmcia_skt_state(skt);

	ret = pcmcia_register_socket(&skt->socket);
	if (ret)
		goto out_err_7;

	add_timer(&skt->poll_timer);

	mutex_unlock(&soc_pcmcia_sockets_lock);

	ret = device_create_file(&skt->socket.dev, &dev_attr_status);
	if (ret)
		goto out_err_8;

	return ret;

 out_err_8:
	mutex_lock(&soc_pcmcia_sockets_lock);
	del_timer_sync(&skt->poll_timer);
	pcmcia_unregister_socket(&skt->socket);

 out_err_7:
	flush_scheduled_work();

	skt->ops->hw_shutdown(skt);
 out_err_6:
	list_del(&skt->node);
	mutex_unlock(&soc_pcmcia_sockets_lock);
	iounmap(skt->virt_io);
 out_err_5:
	release_resource(&skt->res_attr);
 out_err_4:
	release_resource(&skt->res_mem);
 out_err_3:
	release_resource(&skt->res_io);
 out_err_2:
	release_resource(&skt->res_skt);
 out_err_1:

	return ret;
}
示例#26
0
/*
 * stop callback
 */
static int bcm_enet_stop(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int i;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	netif_stop_queue(dev);
	napi_disable(&priv->napi);
	if (priv->has_phy)
		phy_stop(priv->phydev);
	del_timer_sync(&priv->rx_timeout);

	/* mask all interrupts */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	/* make sure no mib update is scheduled */
	flush_scheduled_work();

	/* disable dma & mac */
	bcm_enet_disable_dma(priv, priv->tx_chan);
	bcm_enet_disable_dma(priv, priv->rx_chan);
	bcm_enet_disable_mac(priv);

	/* force reclaim of all tx buffers */
	bcm_enet_tx_reclaim(dev, 1);

	/* free the rx skb ring */
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}

	/* free remaining allocated memory */
	kfree(priv->rx_skb);
	kfree(priv->tx_skb);
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);
	free_irq(priv->irq_tx, dev);
	free_irq(priv->irq_rx, dev);
	free_irq(dev->irq, dev);

	/* release phy */
	if (priv->has_phy) {
		phy_disconnect(priv->phydev);
		priv->phydev = NULL;
	}

	return 0;
}
示例#27
0
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
{
	
	flush_scheduled_work();
	return 0;
}
示例#28
0
void mali_core_scaling_term(void)
{
#ifndef CONFIG_MALI_DVFS
    flush_scheduled_work();
#endif
}
示例#29
0
static void kevent (void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->txq);
		status = usb_clear_halt (dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->rxq);
		status = usb_clear_halt (dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit (dev, urb, GFP_KERNEL);
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static void tx_complete (struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete (struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		// async unlink
		case -ESHUTDOWN:		// hardware gone
			break;

		// like rx, tx gets controller i/o faults during khubd delays
		// and so it uses the same throttling mechanism.
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending (&dev->delay)) {
				mod_timer (&dev->delay,
					jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link (dev))
					devdbg (dev, "tx throttle %d",
							urb->status);
			}
			netif_stop_queue (dev->net);
			break;
		default:
			if (netif_msg_tx_err (dev))
				devdbg (dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);

	unlink_urbs (dev, &dev->txq);
	tasklet_schedule (&dev->bh);

	// FIXME: device recovery -- reset?
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		axusbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

// tasklet (work deferred from completions, in_irq) or timer

static void axusbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			devdbg (dev, "bogus skb state %d", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !timer_pending (&dev->delay)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit (dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link (dev))
				devdbg (dev, "rxqlen %d --> %d",
						temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static
void axusbnet_disconnect (struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev (intf);

	if (netif_msg_probe (dev))
		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev (net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work ();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind (dev, intf);

	free_netdev(net);
	usb_put_dev (xdev);
}

/*-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static int
axusbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk (KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK (&dev->kevent, kevent, dev);
#else
	INIT_WORK (&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
//	mutex_init (&dev->phy_mutex);

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	status = info->bind (dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev (net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe (dev))
		devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	// start as if the link is up
	netif_device_attach (net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend (struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach (dev->net);
		(void) unlink_urbs (dev, &dev->rxq);
		(void) unlink_urbs (dev, &dev->txq);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach (dev->net);
	}
	return 0;
}

static int
axusbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!--dev->suspend_count)
		tasklet_schedule (&dev->bh);

	return 0;
}
static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
{
	flush_scheduled_work();
	return 0;
}