Пример #1
0
static int
airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
	struct orinoco_private *priv;
	struct airport *card;
	unsigned long phys_addr;
	hermes_t *hw;

	if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
		printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n");
		return -ENODEV;
	}

	/* Allocate space for private device-specific data */
	priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
				airport_hard_reset, NULL);
	if (!priv) {
		printk(KERN_ERR PFX "Cannot allocate network device\n");
		return -ENODEV;
	}
	card = priv->card;

	hw = &priv->hw;
	card->mdev = mdev;

	if (macio_request_resource(mdev, 0, DRIVER_NAME)) {
		printk(KERN_ERR PFX "can't request IO resource !\n");
		free_orinocodev(priv);
		return -EBUSY;
	}

	macio_set_drvdata(mdev, priv);

	/* Setup interrupts & base address */
	card->irq = macio_irq(mdev, 0);
	phys_addr = macio_resource_start(mdev, 0);  /* Physical address */
	printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr);
	card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN);
	if (!card->vaddr) {
		printk(KERN_ERR PFX "ioremap() failed\n");
		goto failed;
	}

	hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING);

	/* Power up card */
	pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
			  macio_get_of_node(mdev), 0, 1);
	ssleep(1);

	/* Reset it before we get the interrupt */
	hw->ops->init(hw);

	if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
		printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
		goto failed;
	}
	card->irq_requested = 1;

	/* Initialise the main driver */
	if (orinoco_init(priv) != 0) {
		printk(KERN_ERR PFX "orinoco_init() failed\n");
		goto failed;
	}

	/* Register an interface with the stack */
	if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
		goto failed;
	}
	card->ndev_registered = 1;
	return 0;
 failed:
	airport_detach(mdev);
	return -ENODEV;
}				/* airport_attach */
Пример #2
0
struct net_device * __init ltpc_probe(void)
{
	struct net_device *dev;
	int err = -ENOMEM;
	int x=0,y=0;
	int autoirq;
	unsigned long f;
	unsigned long timeout;

	dev = alloc_ltalkdev(sizeof(struct ltpc_private));
	if (!dev)
		goto out;

	/* probe for the I/O port address */
	
	if (io != 0x240 && request_region(0x220,8,"ltpc")) {
		x = inb_p(0x220+6);
		if ( (x!=0xff) && (x>=0xf0) ) {
			io = 0x220;
			goto got_port;
		}
		release_region(0x220,8);
	}
	if (io != 0x220 && request_region(0x240,8,"ltpc")) {
		y = inb_p(0x240+6);
		if ( (y!=0xff) && (y>=0xf0) ){ 
			io = 0x240;
			goto got_port;
		}
		release_region(0x240,8);
	} 

	/* give up in despair */
	printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
	err = -ENODEV;
	goto out1;

 got_port:
	/* probe for the IRQ line */
	if (irq < 2) {
		unsigned long irq_mask;

		irq_mask = probe_irq_on();
		/* reset the interrupt line */
		inb_p(io+7);
		inb_p(io+7);
		/* trigger an interrupt (I hope) */
		inb_p(io+6);
		mdelay(2);
		autoirq = probe_irq_off(irq_mask);

		if (autoirq == 0) {
			printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
		} else {
			irq = autoirq;
		}
	}

	/* allocate a DMA buffer */
	ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
	if (!ltdmabuf) {
		printk(KERN_ERR "ltpc: mem alloc failed\n");
		err = -ENOMEM;
		goto out2;
	}

	ltdmacbuf = &ltdmabuf[800];

	if(debug & DEBUG_VERBOSE) {
		printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
	}

	/* reset the card */

	inb_p(io+1);
	inb_p(io+3);

	msleep(20);

	inb_p(io+0);
	inb_p(io+2);
	inb_p(io+7); /* clear reset */
	inb_p(io+4); 
	inb_p(io+5);
	inb_p(io+5); /* enable dma */
	inb_p(io+6); /* tri-state interrupt line */

	ssleep(1);
	
	/* now, figure out which dma channel we're using, unless it's
	   already been specified */
	/* well, 0 is a legal DMA channel, but the LTPC card doesn't
	   use it... */
	dma = ltpc_probe_dma(io, dma);
	if (!dma) {  /* no dma channel */
		printk(KERN_ERR "No DMA channel found on ltpc card.\n");
		err = -ENODEV;
		goto out3;
	}

	/* print out friendly message */
	if(irq)
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
	else
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d.  Using polled mode.\n",io,dma);

	dev->netdev_ops = &ltpc_netdev;
	dev->base_addr = io;
	dev->irq = irq;
	dev->dma = dma;

	/* the card will want to send a result at this point */
	/* (I think... leaving out this part makes the kernel crash,
           so I put it back in...) */

	f=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,0x100);
	enable_dma(dma);
	release_dma_lock(f);

	(void) inb_p(io+3);
	(void) inb_p(io+2);
	timeout = jiffies+100*HZ/100;

	while(time_before(jiffies, timeout)) {
		if( 0xf9 == inb_p(io+6))
			break;
		schedule();
	}

	if(debug & DEBUG_VERBOSE) {
		printk("setting up timer and irq\n");
	}

	/* grab it and don't let go :-) */
	if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
	{
		(void) inb_p(io+7);  /* enable interrupts from board */
		(void) inb_p(io+7);  /* and reset irq line */
	} else {
		if( irq )
			printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
		dev->irq = 0;
		/* polled mode -- 20 times per second */
		/* this is really, really slow... should it poll more often? */
		init_timer(&ltpc_timer);
		ltpc_timer.function=ltpc_poll;
		ltpc_timer.data = (unsigned long) dev;

		ltpc_timer.expires = jiffies + HZ/20;
		add_timer(&ltpc_timer);
	}
	err = register_netdev(dev);
	if (err)
		goto out4;

	return NULL;
out4:
	del_timer_sync(&ltpc_timer);
	if (dev->irq)
		free_irq(dev->irq, dev);
out3:
	free_pages((unsigned long)ltdmabuf, get_order(1000));
out2:
	release_region(io, 8);
out1:
	free_netdev(dev);
out:
	return ERR_PTR(err);
}
Пример #3
0
/*
 * Linux interface functions
 */
static int
sb1000_open(struct net_device *dev)
{
	char *name;
	int ioaddr[2], status;
	struct sb1000_private *lp = netdev_priv(dev);
	const unsigned short FirmwareVersion[] = {0x01, 0x01};

	ioaddr[0] = dev->base_addr;
	/* mem_start holds the second I/O address */
	ioaddr[1] = dev->mem_start;
	name = dev->name;

	/* initialize sb1000 */
	if ((status = sb1000_reset(ioaddr, name)))
		return status;
	ssleep(1);
	if ((status = sb1000_check_CRC(ioaddr, name)))
		return status;

	/* initialize private data before board can catch interrupts */
	lp->rx_skb[0] = NULL;
	lp->rx_skb[1] = NULL;
	lp->rx_skb[2] = NULL;
	lp->rx_skb[3] = NULL;
	lp->rx_dlen[0] = 0;
	lp->rx_dlen[1] = 0;
	lp->rx_dlen[2] = 0;
	lp->rx_dlen[3] = 0;
	lp->rx_frames = 0;
	lp->rx_error_count = 0;
	lp->rx_error_dpc_count = 0;
	lp->rx_session_id[0] = 0x50;
	lp->rx_session_id[1] = 0x48;
	lp->rx_session_id[2] = 0x44;
	lp->rx_session_id[3] = 0x42;
	lp->rx_frame_id[0] = 0;
	lp->rx_frame_id[1] = 0;
	lp->rx_frame_id[2] = 0;
	lp->rx_frame_id[3] = 0;
	if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
		return -EAGAIN;
	}

	if (sb1000_debug > 2)
		printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);

	/* Activate board and check firmware version */
	udelay(1000);
	if ((status = sb1000_activate(ioaddr, name)))
		return status;
	udelay(0);
	if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
		return status;
	if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
		printk(KERN_WARNING "%s: found firmware version %x.%02x "
			"(should be %x.%02x)\n", name, version[0], version[1],
			FirmwareVersion[0], FirmwareVersion[1]);


	netif_start_queue(dev);
	return 0;					/* Always succeed */
}
Пример #4
0
/**
 * software_resume - Resume from a saved hibernation image.
 *
 * This routine is called as a late initcall, when all devices have been
 * discovered and initialized already.
 *
 * The image reading code is called to see if there is a hibernation image
 * available for reading.  If that is the case, devices are quiesced and the
 * contents of memory is restored from the saved image.
 *
 * If this is successful, control reappears in the restored target kernel in
 * hibernation_snaphot() which returns to hibernate().  Otherwise, the routine
 * attempts to recover gracefully and make the kernel return to the normal mode
 * of operation.
 */
static int software_resume(void)
{
	int error;
	unsigned int flags;

	/*
	 * If the user said "noresume".. bail out early.
	 */
	if (noresume || !hibernation_available())
		return 0;

	/*
	 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
	 * is configured into the kernel. Since the regular hibernate
	 * trigger path is via sysfs which takes a buffer mutex before
	 * calling hibernate functions (which take pm_mutex) this can
	 * cause lockdep to complain about a possible ABBA deadlock
	 * which cannot happen since we're in the boot code here and
	 * sysfs can't be invoked yet. Therefore, we use a subclass
	 * here to avoid lockdep complaining.
	 */
	mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);

	if (swsusp_resume_device)
		goto Check_image;

	if (!strlen(resume_file)) {
		error = -ENOENT;
		goto Unlock;
	}

	pr_debug("PM: Checking hibernation image partition %s\n", resume_file);

	if (resume_delay) {
		printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
			resume_delay);
		ssleep(resume_delay);
	}

	/* Check if the device is there */
	swsusp_resume_device = name_to_dev_t(resume_file);

	/*
	 * name_to_dev_t is ineffective to verify parition if resume_file is in
	 * integer format. (e.g. major:minor)
	 */
	if (isdigit(resume_file[0]) && resume_wait) {
		int partno;
		while (!get_gendisk(swsusp_resume_device, &partno))
			msleep(10);
	}

	if (!swsusp_resume_device) {
		/*
		 * Some device discovery might still be in progress; we need
		 * to wait for this to finish.
		 */
		wait_for_device_probe();

		if (resume_wait) {
			while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
				msleep(10);
			async_synchronize_full();
		}

		swsusp_resume_device = name_to_dev_t(resume_file);
		if (!swsusp_resume_device) {
			error = -ENODEV;
			goto Unlock;
		}
	}

 Check_image:
	pr_debug("PM: Hibernation image partition %d:%d present\n",
		MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));

	pr_debug("PM: Looking for hibernation image.\n");
	error = swsusp_check();
	if (error)
		goto Unlock;

	/* The snapshot device should not be opened while we're running */
	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
		error = -EBUSY;
		swsusp_close(FMODE_READ);
		goto Unlock;
	}

	pm_prepare_console();
	error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
	if (error)
		goto Close_Finish;

	pr_debug("PM: Preparing processes for restore.\n");
	error = freeze_processes();
	if (error)
		goto Close_Finish;

	pr_debug("PM: Loading hibernation image.\n");

	lock_device_hotplug();
	error = create_basic_memory_bitmaps();
	if (error)
		goto Thaw;

	error = swsusp_read(&flags);
	swsusp_close(FMODE_READ);
	if (!error)
		hibernation_restore(flags & SF_PLATFORM_MODE);

	printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
	swsusp_free();
	free_basic_memory_bitmaps();
 Thaw:
	unlock_device_hotplug();
	thaw_processes();
 Finish:
	pm_notifier_call_chain(PM_POST_RESTORE);
	pm_restore_console();
	atomic_inc(&snapshot_device_available);
	/* For success case, the suspend path will release the lock */
 Unlock:
	mutex_unlock(&pm_mutex);
	pr_debug("PM: Hibernation image not present or could not be loaded.\n");
	return error;
 Close_Finish:
	swsusp_close(FMODE_READ);
	goto Finish;
}
Пример #5
0
int test_random_thread(void * arg)
{
    TZ_RESULT ret;
    KREE_SESSION_HANDLE ndbg_session;
    KREE_SESSION_HANDLE mem_session;
    KREE_SHAREDMEM_HANDLE shm_handle;
    KREE_SHAREDMEM_PARAM shm_param;
    MTEEC_PARAM param[4];
    uint32_t *ptr;
    int size = 32;

    ptr = (uint32_t *)kmalloc(size, GFP_KERNEL);
    memset(ptr, 0, size);

    while(!kthread_should_stop())
    {
        ret = KREE_CreateSession(TZ_TA_NDBG_UUID, &ndbg_session);
        if (ret != TZ_RESULT_SUCCESS)
        {
            printk("CreateSession error %d\n", ret);
            return 1;
        }

        ret = KREE_CreateSession(TZ_TA_MEM_UUID, &mem_session);
        if (ret != TZ_RESULT_SUCCESS)
        {
            printk("Create memory session error %d\n", ret);
            return 1;
        }

        shm_param.buffer = ptr;
        shm_param.size = size;
        ret = KREE_RegisterSharedmem(mem_session, &shm_handle, &shm_param);
        if (ret != TZ_RESULT_SUCCESS)
        {
            printk("KREE_RegisterSharedmem Error: %s\n", TZ_GetErrorString(ret));
            return 1;
        }

        param[0].memref.handle = (uint32_t) shm_handle;
        param[0].memref.offset = 0;
        param[0].memref.size = size/4;
        param[1].value.a = size/4;

        ret = KREE_TeeServiceCall((KREE_SESSION_HANDLE)ndbg_session,
                TZCMD_NDBG_RANDOM,
                TZ_ParamTypes3(TZPT_MEMREF_INPUT, TZPT_VALUE_INPUT, TZPT_VALUE_OUTPUT), param);

        ret = KREE_UnregisterSharedmem(mem_session, shm_handle);
        if (ret != TZ_RESULT_SUCCESS)
        {
            printk("KREE_UnregisterSharedmem Error: %s\n", TZ_GetErrorString(ret));
            return 1;
        }

        ret = KREE_CloseSession(ndbg_session);
        if (ret != TZ_RESULT_SUCCESS)
            printk("CloseSession error %d\n", ret);

        ret = KREE_CloseSession(mem_session);
        if (ret != TZ_RESULT_SUCCESS)
            printk("Close memory session error %d\n", ret);

        ssleep(5);
    }

    kfree(ptr);

    return 0;
}
Пример #6
0
static int __init mount_nfs_root(void)
{
	char *root_dev, *root_data;
	unsigned int timeout;
	int try, err;

	err = nfs_root_data(&root_dev, &root_data);
	if (err != 0)
		return 0;

	/*
	 * The server or network may not be ready, so try several
	 * times.  Stop after a few tries in case the client wants
	 * to fall back to other boot methods.
	 */
	timeout = NFSROOT_TIMEOUT_MIN;
	for (try = 1; ; try++) {
		err = do_mount_root(root_dev, "nfs",
					root_mountflags, root_data);
		if (err == 0)
			return 1;
		if (try > NFSROOT_RETRY_MAX)
			break;

		/* Wait, in case the server refused us immediately */
		ssleep(timeout);
		timeout <<= 1;
		if (timeout > NFSROOT_TIMEOUT_MAX)
			timeout = NFSROOT_TIMEOUT_MAX;
	}
	return 0;
}
#endif

#if defined(CONFIG_BLK_DEV_RAM) || defined(CONFIG_BLK_DEV_FD)
void __init change_floppy(char *fmt, ...)
{
	struct termios termios;
	char buf[80];
	char c;
	int fd;
	va_list args;
	va_start(args, fmt);
	vsprintf(buf, fmt, args);
	va_end(args);
	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
	if (fd >= 0) {
		sys_ioctl(fd, FDEJECT, 0);
		sys_close(fd);
	}
	printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
	fd = sys_open("/dev/console", O_RDWR, 0);
	if (fd >= 0) {
		sys_ioctl(fd, TCGETS, (long)&termios);
		termios.c_lflag &= ~ICANON;
		sys_ioctl(fd, TCSETSF, (long)&termios);
		sys_read(fd, &c, 1);
		termios.c_lflag |= ICANON;
		sys_ioctl(fd, TCSETSF, (long)&termios);
		sys_close(fd);
	}
}
#endif

void __init mount_root(void)
{
#ifdef CONFIG_ROOT_NFS
	if (ROOT_DEV == Root_NFS) {
		if (mount_nfs_root())
			return;

		printk(KERN_ERR "VFS: Unable to mount root fs via NFS, trying floppy.\n");
		ROOT_DEV = Root_FD0;
	}
#endif
#ifdef CONFIG_BLK_DEV_FD
	if (MAJOR(ROOT_DEV) == FLOPPY_MAJOR) {
		/* rd_doload is 2 for a dual initrd/ramload setup */
		if (rd_doload==2) {
			if (rd_load_disk(1)) {
				ROOT_DEV = Root_RAM1;
				root_device_name = NULL;
			}
		} else
			change_floppy("root floppy");
	}
#endif
#ifdef CONFIG_BLOCK
	{
		int err = create_dev("/dev/root", ROOT_DEV);

		if (err < 0)
			pr_emerg("Failed to create /dev/root: %d\n", err);
		mount_block_root("/dev/root", root_mountflags);
	}
#endif
}

/*
 * Prepare the namespace - decide what/where to mount, load ramdisks, etc.
 */
void __init prepare_namespace(void)
{
	int is_floppy;

	if (root_delay) {
		printk(KERN_INFO "Waiting %d sec before mounting root device...\n",
		       root_delay);
		ssleep(root_delay);
	}

	/*
	 * wait for the known devices to complete their probing
	 *
	 * Note: this is a potential source of long boot delays.
	 * For example, it is not atypical to wait 5 seconds here
	 * for the touchpad of a laptop to initialize.
	 */
	wait_for_device_probe();

	md_run_setup();

	if (saved_root_name[0]) {
		root_device_name = saved_root_name;
		if (!strncmp(root_device_name, "mtd", 3) ||
		    !strncmp(root_device_name, "ubi", 3)) {
			mount_block_root(root_device_name, root_mountflags);
			goto out;
		}
		ROOT_DEV = name_to_dev_t(root_device_name);
		if (strncmp(root_device_name, "/dev/", 5) == 0)
			root_device_name += 5;
	}

	if (initrd_load())
		goto out;

	/* wait for any asynchronous scanning to complete */
	if ((ROOT_DEV == 0) && root_wait) {
		printk(KERN_INFO "Waiting for root device %s...\n",
			saved_root_name);
		while (driver_probe_done() != 0 ||
			(ROOT_DEV = name_to_dev_t(saved_root_name)) == 0)
			msleep(5);
		async_synchronize_full();
	}

	is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR;

	if (is_floppy && rd_doload && rd_load_disk(0))
		ROOT_DEV = Root_RAM0;

	mount_root();
out:
	devtmpfs_mount("dev");
	sys_mount(".", "/", NULL, MS_MOVE, NULL);
	sys_chroot(".");
}
Пример #7
0
/**
 * _config_request - main routine for sending config page requests
 * @ioc: per adapter object
 * @mpi_request: request message frame
 * @mpi_reply: reply mf payload returned from firmware
 * @timeout: timeout in seconds
 * @config_page: contents of the config page
 * @config_page_sz: size of config page
 * Context: sleep
 *
 * A generic API for config page requests to firmware.
 *
 * The ioc->config_cmds.status flag should be MPT2_CMD_NOT_USED before calling
 * this API.
 *
 * The callback index is set inside `ioc->config_cb_idx.
 *
 * Returns 0 for success, non-zero for failure.
 */
static int
_config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
    *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
    void *config_page, u16 config_page_sz)
{
	u16 smid;
	u32 ioc_state;
	unsigned long timeleft;
	Mpi2ConfigRequest_t *config_request;
	int r;
	u8 retry_count, issue_host_reset = 0;
	u16 wait_state_count;
	struct config_request mem;

	mutex_lock(&ioc->config_cmds.mutex);
	if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
		printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
		    ioc->name, __func__);
		mutex_unlock(&ioc->config_cmds.mutex);
		return -EAGAIN;
	}

	retry_count = 0;
	memset(&mem, 0, sizeof(struct config_request));

	mpi_request->VF_ID = 0; /* TODO */
	mpi_request->VP_ID = 0;

	if (config_page) {
		mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
		mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
		mpi_request->Header.PageType = mpi_reply->Header.PageType;
		mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
		mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
		mpi_request->ExtPageType = mpi_reply->ExtPageType;
		if (mpi_request->Header.PageLength)
			mem.sz = mpi_request->Header.PageLength * 4;
		else
			mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
		r = _config_alloc_config_dma_memory(ioc, &mem);
		if (r != 0)
			goto out;
		if (mpi_request->Action ==
		    MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
		    mpi_request->Action ==
		    MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
			ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
			    MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
			    mem.page_dma);
			memcpy(mem.page, config_page, min_t(u16, mem.sz,
			    config_page_sz));
		} else {
			memset(config_page, 0, config_page_sz);
			ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
			    MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
		}
	}

 retry_config:
	if (retry_count) {
		if (retry_count > 2) { /* attempt only 2 retries */
			r = -EFAULT;
			goto free_mem;
		}
		printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
		    ioc->name, __func__, retry_count);
	}
	wait_state_count = 0;
	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
		if (wait_state_count++ == MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT) {
			printk(MPT2SAS_ERR_FMT
			    "%s: failed due to ioc not operational\n",
			    ioc->name, __func__);
			ioc->config_cmds.status = MPT2_CMD_NOT_USED;
			r = -EFAULT;
			goto free_mem;
		}
		ssleep(1);
		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
		printk(MPT2SAS_INFO_FMT "%s: waiting for "
		    "operational state(count=%d)\n", ioc->name,
		    __func__, wait_state_count);
	}
	if (wait_state_count)
		printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
		    ioc->name, __func__);

	smid = mpt2sas_base_get_smid(ioc, ioc->config_cb_idx);
	if (!smid) {
		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
		    ioc->name, __func__);
		ioc->config_cmds.status = MPT2_CMD_NOT_USED;
		r = -EAGAIN;
		goto free_mem;
	}

	r = 0;
	memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
	ioc->config_cmds.status = MPT2_CMD_PENDING;
	config_request = mpt2sas_base_get_msg_frame(ioc, smid);
	ioc->config_cmds.smid = smid;
	memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
	_config_display_some_debug(ioc, smid, "config_request", NULL);
#endif
	init_completion(&ioc->config_cmds.done);
	mpt2sas_base_put_smid_default(ioc, smid);
	timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
	    timeout*HZ);
	if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) {
		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
		    ioc->name, __func__);
		_debug_dump_mf(mpi_request,
		    sizeof(Mpi2ConfigRequest_t)/4);
		retry_count++;
		if (ioc->config_cmds.smid == smid)
			mpt2sas_base_free_smid(ioc, smid);
		if ((ioc->shost_recovery) || (ioc->config_cmds.status &
		    MPT2_CMD_RESET) || ioc->pci_error_recovery)
			goto retry_config;
		issue_host_reset = 1;
		r = -EFAULT;
		goto free_mem;
	}

	if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
		memcpy(mpi_reply, ioc->config_cmds.reply,
		    sizeof(Mpi2ConfigReply_t));
	if (retry_count)
		printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n",
		    ioc->name, __func__, retry_count);
	if (config_page && mpi_request->Action ==
	    MPI2_CONFIG_ACTION_PAGE_READ_CURRENT)
		memcpy(config_page, mem.page, min_t(u16, mem.sz,
		    config_page_sz));
 free_mem:
	if (config_page)
		_config_free_config_dma_memory(ioc, &mem);
 out:
	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
	mutex_unlock(&ioc->config_cmds.mutex);

	if (issue_host_reset)
		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
		    FORCE_BIG_HAMMER);
	return r;
}
Пример #8
0
static void __exit orinoco_tmd_exit(void)
{
	pci_unregister_driver(&orinoco_tmd_driver);
	ssleep(1);
}
Пример #9
0
/*
 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
 */
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
	struct sockaddr_in	addr;
	struct nfs4_callback    *cb = &clp->cl_callback;
	struct rpc_timeout	timeparms = {
		.to_initval	= (NFSD_LEASE_TIME/4) * HZ,
		.to_retries	= 5,
		.to_maxval	= (NFSD_LEASE_TIME/2) * HZ,
		.to_exponential	= 1,
	};
	struct rpc_program *	program = &cb->cb_program;
	struct rpc_create_args args = {
		.protocol	= IPPROTO_TCP,
		.address	= (struct sockaddr *)&addr,
		.addrsize	= sizeof(addr),
		.timeout	= &timeparms,
		.program	= program,
		.version	= nfs_cb_version[1]->number,
		.authflavor	= RPC_AUTH_UNIX,	/* XXX: need AUTH_GSS... */
		.flags		= (RPC_CLNT_CREATE_NOPING),
	};
	struct task_struct *t;

	if (atomic_read(&cb->cb_set))
		return;

	/* Initialize address */
	memset(&addr, 0, sizeof(addr));
	addr.sin_family = AF_INET;
	addr.sin_port = htons(cb->cb_port);
	addr.sin_addr.s_addr = htonl(cb->cb_addr);

	/* Initialize rpc_program */
	program->name = "nfs4_cb";
	program->number = cb->cb_prog;
	program->nrvers = ARRAY_SIZE(nfs_cb_version);
	program->version = nfs_cb_version;
	program->stats = &cb->cb_stat;

	/* Initialize rpc_stat */
	memset(program->stats, 0, sizeof(cb->cb_stat));
	program->stats->program = program;

	/* Create RPC client */
	cb->cb_client = rpc_create(&args);
	if (IS_ERR(cb->cb_client)) {
		dprintk("NFSD: couldn't create callback client\n");
		goto out_err;
	}

	/* the task holds a reference to the nfs4_client struct */
	atomic_inc(&clp->cl_count);

	t = kthread_run(do_probe_callback, clp, "nfs4_cb_probe");

	if (IS_ERR(t))
		goto out_release_clp;

	return;

out_release_clp:
	atomic_dec(&clp->cl_count);
	rpc_shutdown_client(cb->cb_client);
out_err:
	cb->cb_client = NULL;
	dprintk("NFSD: warning: no callback path to client %.*s\n",
		(int)clp->cl_name.len, clp->cl_name.data);
}

/*
 * called with dp->dl_count inc'ed.
 * nfs4_lock_state() may or may not have been called.
 */
void
nfsd4_cb_recall(struct nfs4_delegation *dp)
{
	struct nfs4_client *clp = dp->dl_client;
	struct rpc_clnt *clnt = clp->cl_callback.cb_client;
	struct nfs4_cb_recall *cbr = &dp->dl_recall;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
		.rpc_argp = cbr,
	};
	int retries = 1;
	int status = 0;

	if ((!atomic_read(&clp->cl_callback.cb_set)) || !clnt)
		return;

	cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */
	cbr->cbr_dp = dp;

	status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
	while (retries--) {
		switch (status) {
			case -EIO:
				/* Network partition? */
			case -EBADHANDLE:
			case -NFS4ERR_BAD_STATEID:
				/* Race: client probably got cb_recall
				 * before open reply granting delegation */
				break;
			default:
				goto out_put_cred;
		}
		ssleep(2);
		status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
	}
out_put_cred:
	if (status == -EIO)
		atomic_set(&clp->cl_callback.cb_set, 0);
	/* Success or failure, now we're either waiting for lease expiration
	 * or deleg_return. */
	dprintk("NFSD: nfs4_cb_recall: dp %p dl_flock %p dl_count %d\n",dp, dp->dl_flock, atomic_read(&dp->dl_count));
	put_nfs4_client(clp);
	nfs4_put_delegation(dp);
	return;
}
Пример #10
0
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
	struct scsi_device *SDev;
	struct scsi_sense_hdr sshdr;
	int result, err = 0, retries = 0;
	struct request_sense *sense = cgc->sense;

	SDev = cd->device;

	if (!sense) {
		sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
		if (!sense) {
			err = -ENOMEM;
			goto out;
		}
	}

      retry:
	if (!scsi_block_when_processing_errors(SDev)) {
		err = -ENODEV;
		goto out;
	}

	memset(sense, 0, sizeof(*sense));
	result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
			      cgc->buffer, cgc->buflen, (char *)sense,
			      cgc->timeout, IOCTL_RETRIES, 0, NULL);

	scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);

	/* Minimal error checking.  Ignore cases we know about, and report the rest. */
	if (driver_byte(result) != 0) {
		switch (sshdr.sense_key) {
		case UNIT_ATTENTION:
			SDev->changed = 1;
			if (!cgc->quiet)
				printk(KERN_INFO "%s: disc change detected.\n", cd->cdi.name);
			if (retries++ < 10)
				goto retry;
			err = -ENOMEDIUM;
			break;
		case NOT_READY:	/* This happens if there is no disc in drive */
			if (sshdr.asc == 0x04 &&
			    sshdr.ascq == 0x01) {
				/* sense: Logical unit is in process of becoming ready */
				if (!cgc->quiet)
					printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name);
				if (retries++ < 10) {
					/* sleep 2 sec and try again */
					ssleep(2);
					goto retry;
				} else {
					/* 20 secs are enough? */
					err = -ENOMEDIUM;
					break;
				}
			}
			if (!cgc->quiet)
				printk(KERN_INFO "%s: CDROM not ready.  Make sure there is a disc in the drive.\n", cd->cdi.name);
#ifdef DEBUG
			scsi_print_sense_hdr("sr", &sshdr);
#endif
			err = -ENOMEDIUM;
			break;
		case ILLEGAL_REQUEST:
			err = -EIO;
			if (sshdr.asc == 0x20 &&
			    sshdr.ascq == 0x00)
				/* sense: Invalid command operation code */
				err = -EDRIVE_CANT_DO_THIS;
#ifdef DEBUG
			__scsi_print_command(cgc->cmd);
			scsi_print_sense_hdr("sr", &sshdr);
#endif
			break;
		default:
			printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name);
			__scsi_print_command(cgc->cmd);
			scsi_print_sense_hdr("sr", &sshdr);
			err = -EIO;
		}
	}

	/* Wake up a process waiting for device */
      out:
	if (!cgc->sense)
		kfree(sense);
	cgc->stat = err;
	return err;
}
Пример #11
0
static void __init bt_power_init(void)
{
  gpio_set_value(23, 1);
  ssleep(1); /* 1 sec */
  gpio_set_value(23, 0);
}