Ejemplo n.º 1
0
/*----------------------------------------------------------------------
* Name:    ibmphp_hpc_initvars
*
* Action:  initialize semaphores and variables
*---------------------------------------------------------------------*/
void ibmphp_hpc_initvars (void)
{
	debug ("%s - Entry\n", __FUNCTION__);

	init_MUTEX (&sem_hpcaccess);
	init_MUTEX (&semOperations);
	init_MUTEX_LOCKED (&sem_exit);
	init_MUTEX_LOCKED (&sem_poll);
	stop_polling = POLL_YES;
	to_debug = FALSE;
	ibmphp_shutdown = FALSE;
	tid_poll = 0;

	debug ("%s - Exit\n", __FUNCTION__);
}
Ejemplo n.º 2
0
static int __init eatingfruit_init(void)
{
	printk("eating fruit module is working..\n");

	//creating four threads
	kernel_thread(father_thread,"father",CLONE_KERNEL);
	kernel_thread(mother_thread,"mother",CLONE_KERNEL);
	kernel_thread(son_thread,"son",CLONE_KERNEL);
	kernel_thread(daughter_thread,"daughter",CLONE_KERNEL);

	init_MUTEX(&mutex);
	init_MUTEX_LOCKED(&apple);
	init_MUTEX_LOCKED(&orange);

	return 0;
}
Ejemplo n.º 3
0
/*
 * Set up the platform-dependent fields in the nodepda.
 */
void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
{
    hubinfo_t hubinfo;

    extern void router_map_init(nodepda_t *);
    extern void router_queue_init(nodepda_t *,cnodeid_t);
    extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);

    /* Allocate per-node platform-dependent data */
    hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));

    npda->pdinfo = (void *)hubinfo;
    hubinfo->h_nodepda = npda;
    hubinfo->h_cnodeid = node;

    spin_lock_init(&hubinfo->h_crblock);

    npda->xbow_peer = INVALID_NASID;

    /*
     * Initialize the linked list of
     * router info pointers to the dependent routers
     */
    npda->npda_rip_first = NULL;

    /*
     * npda_rip_last always points to the place
     * where the next element is to be inserted
     * into the list
     */
    npda->npda_rip_last = &npda->npda_rip_first;
    npda->geoid.any.type = GEO_TYPE_INVALID;

    init_MUTEX_LOCKED(&npda->xbow_sema); /* init it locked? */
}
static int usb_pmic_mod_init(void)
{
	PMIC_STATUS rs = PMIC_ERROR;

	init_MUTEX_LOCKED(&pmic_mx);

	rs = pmic_convity_open(&pmic_handle, USB);
	if (rs != PMIC_SUCCESS) {
		printk(KERN_ERR "pmic_convity_open returned error %d\n", rs);
		return rs;
	}

	rs = pmic_convity_set_callback(pmic_handle, pmic_event_handler,
				       USB_DETECT_4V4_RISE | USB_DETECT_4V4_FALL
				       | USB_DETECT_2V0_RISE |
				       USB_DETECT_2V0_FALL | USB_DETECT_0V8_RISE
				       | USB_DETECT_0V8_FALL | USB_DETECT_MINI_A
				       | USB_DETECT_MINI_B);

	if (rs != PMIC_SUCCESS) {
		printk(KERN_ERR
		       "pmic_convity_set_callback returned error %d\n", rs);
		return rs;
	}

	return rs;
}
Ejemplo n.º 5
0
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri)
{
	struct jffs2_inode_cache *ic;

	ic = jffs2_alloc_inode_cache();
	if (!ic) {
		return -ENOMEM;
	}

	memset(ic, 0, sizeof(*ic));

	init_MUTEX_LOCKED(&f->sem);
	f->inocache = ic;
	f->inocache->nlink = 1;
	f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
	f->inocache->ino = ++c->highest_ino;
	f->inocache->state = INO_STATE_PRESENT;

	ri->ino = cpu_to_je32(f->inocache->ino);

	D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
	jffs2_add_ino_cache(c, f->inocache);

	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
	ri->totlen = cpu_to_je32(PAD(sizeof(*ri)));
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
	ri->mode = cpu_to_jemode(mode);

	f->highest_version = 1;
	ri->version = cpu_to_je32(f->highest_version);

	return 0;
}
Ejemplo n.º 6
0
static int __init wixevent_init ( void ) {
	struct proc_dir_entry *entry;

	WixEventHead = WixEventTail = NULL;
	wixEventCount = 0;
	init_MUTEX_LOCKED( &wixevent_semaphore );
	up( &wixevent_semaphore );

	if ( (wixevent_devmajor = register_chrdev(EVENT_MAJOR, DRV_NAME,&wixevent_fops)) < 0 ) {
		WIXPRINT( "unable to get major %d for wixevent device\n", EVENT_MAJOR );
		return -1;
	}
	WIXDEBUG( "wixevent major %d for the wixevent devs\n", wixevent_devmajor );

	wixevent_class = class_create( THIS_MODULE, "wixevent" );
	//class_device_create( wixevent_class, MKDEV(wixevent_devmajor,0), NULL, DRV_NAME, 0 );
	class_device_create( wixevent_class, NULL, MKDEV(wixevent_devmajor,0), NULL, DRV_NAME );

	proc_wixevent = proc_mkdir( "wixevent", NULL );
	entry = create_proc_entry( "count", 0, proc_wixevent );
	if ( entry ) {
		entry->read_proc = proc_wixevent_count_read;
		entry->data = NULL;
	}

	return 0;
}
Ejemplo n.º 7
0
int pciehp_event_start_thread(void)
{
	int pid;

	/* initialize our semaphores */
	init_MUTEX_LOCKED(&event_exit);
	event_finished=0;

	init_MUTEX_LOCKED(&event_semaphore);
	pid = kernel_thread(event_thread, NULL, 0);

	if (pid < 0) {
		err ("Can't start up our event thread\n");
		return -1;
	}
	return 0;
}
Ejemplo n.º 8
0
static int procon_init(void){
	printk(KERN_INFO"show producer and consumer\n");
	init_MUTEX(&sem_producer);
	init_MUTEX_LOCKED(&sem_consumer);
	atomic_set(&num,0);
	kernel_thread(producer,product,CLONE_KERNEL);
	kernel_thread(consumer,product,CLONE_KERNEL);
	return 0;
}
Ejemplo n.º 9
0
static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
{
	struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;

	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
	    SLAB_CTOR_CONSTRUCTOR) {
		init_MUTEX_LOCKED(&ei->sem);
		inode_init_once(&ei->vfs_inode);
	}
}
/*----------------------------------------------------------------------
* Name:    ibmphp_hpc_initvars
*
* Action:  initialize semaphores and variables
*---------------------------------------------------------------------*/
void __init ibmphp_hpc_initvars (void)
{
	debug ("%s - Entry\n", __func__);

	mutex_init(&sem_hpcaccess);
	init_MUTEX (&semOperations);
	init_MUTEX_LOCKED (&sem_exit);
	to_debug = 0;

	debug ("%s - Exit\n", __func__);
}
Ejemplo n.º 11
0
/*
 * s390_init_machine_check
 *
 * initialize machine check handling
 */
static int
machine_check_init(void)
{
    init_MUTEX_LOCKED(&m_sem);
    ctl_clear_bit(14, 25);	/* disable damage MCH */
    ctl_set_bit(14, 26);	/* enable degradation MCH */
    ctl_set_bit(14, 27);	/* enable system recovery MCH */
#ifdef CONFIG_MACHCHK_WARNING
    ctl_set_bit(14, 24);	/* enable warning MCH */
#endif
    return 0;
}
Ejemplo n.º 12
0
//
// initialize machine check handling
//
void s390_init_machine_check( void )
{
	init_MUTEX_LOCKED( &s_sem[0] );
	init_MUTEX_LOCKED( &s_sem[1] );

#if 0
	//
	// fix me ! initialize a machine check queue with 100 elements
	//
#ifdef S390_MACHCHK_DEBUG
	printk( "init_mach : starting kernel thread\n");
#endif	

	kernel_thread( s390_machine_check_handler, s_sem, 0);

	//
	// wait for the machine check handler to be ready
	//
#ifdef S390_MACHCHK_DEBUG
	printk( "init_mach : waiting for kernel thread\n");
#endif	

	down( &sem[0]);

#ifdef S390_MACHCHK_DEBUG
	printk( "init_mach : kernel thread ready\n");
#endif	

	//
	// fix me ! we have to initialize CR14 to allow for CRW pending
	//           conditions

	//
	// fix me ! enable machine checks in the PSW
	//
#endif
	return;
}
Ejemplo n.º 13
0
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct aac_fib_context * fibctx;
	int status;

	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
	if (fibctx == NULL) {
		status = -ENOMEM;
	} else {
		unsigned long flags;
		struct list_head * entry;
		struct aac_fib_context * context;

		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
		fibctx->size = sizeof(struct aac_fib_context);
		
		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
		
		init_MUTEX_LOCKED(&fibctx->wait_sem);
		fibctx->wait = 0;
		
		fibctx->count = 0;
		INIT_LIST_HEAD(&fibctx->fib_list);
		fibctx->jiffies = jiffies/HZ;
		
		spin_lock_irqsave(&dev->fib_lock, flags);
		
		entry = dev->fib_list.next;
		while (entry != &dev->fib_list) {
			context = list_entry(entry, struct aac_fib_context, next);
			if (context->unique == fibctx->unique) {
				
				fibctx->unique++;
				entry = dev->fib_list.next;
			} else {
				entry = entry->next;
			}
		}
		list_add_tail(&fibctx->next, &dev->fib_list);
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(arg, &fibctx->unique,
						sizeof(fibctx->unique))) {
			status = -EFAULT;
		} else {
			status = 0;
		}
	}
	return status;
}
Ejemplo n.º 14
0
/*! otg_task_init
 *@brief Create otg task structure, create workqueue, initialize it.
 *@param name - name of task or workqueue
 *@param proc - handler
 *@param data - parameter pointer for handler
 *@param tag-
 *@return initialized otg_task instance pointer
 */
struct otg_task *otg_task_init2(char *name, otg_task_proc_t proc, otg_task_arg_t data, otg_tag_t tag)
{
        struct otg_task *task;

        //TRACE_STRING(tag, "INIT: %s", name);

        RETURN_NULL_UNLESS((task = CKMALLOC(sizeof (struct otg_task))));

        task->tag = tag;
        task->data = data;
        task->name = name;
        task->proc = proc;

        #if defined(OTG_TASK_WORK)
        task->terminated = task->terminate = TRUE;
        #else /* defined(OTG_TASK_WORK) */
        task->terminated = task->terminate = FALSE;
        #if defined(LINUX26)
        THROW_UNLESS((task->work_queue = create_singlethread_workqueue(name)), error);
        #else /* LINUX26 */
        THROW_UNLESS((task->work_queue = create_workqueue(name)), error);
        #endif /* LINUX26 */
        init_MUTEX_LOCKED(&task->admin_sem);
        init_MUTEX_LOCKED(&task->work_sem);
        #endif /* defined(OTG_TASK_WORK) */

        INIT_WORK(&task->work, otg_task_proc, task);

        return task;

        CATCH(error) {
                printk(KERN_INFO"%s: ERROR\n", __FUNCTION__);
                if (task) LKFREE(task);
                return NULL;
        }
}
Ejemplo n.º 15
0
module_t *module_add_node(moduleid_t id, cnodeid_t n)
{
    module_t	       *m;
    int			i;

    DPRINTF("module_add_node: id=%x node=%d\n", id, n);

    if ((m = module_lookup(id)) == 0) {
#ifndef CONFIG_IA64_SGI_IO
	m = kmem_zalloc_node(sizeof (module_t), KM_NOSLEEP, n);
#else
	m = kmalloc(sizeof (module_t), GFP_KERNEL);
	memset(m, 0 , sizeof(module_t));
	printk("Module nodecnt = %d\n", m->nodecnt); 
#endif
	ASSERT_ALWAYS(m);

	DPRINTF("module_add_node: m=0x%p\n", m);

	m->id = id;
	spin_lock_init(&m->lock);

	init_MUTEX_LOCKED(&m->thdcnt);

printk("Set elsc to 0x%p on node %d\n", &m->elsc, get_nasid());

set_elsc(&m->elsc);
	elsc_init(&m->elsc, COMPACT_TO_NASID_NODEID(n));
	spin_lock_init(&m->elsclock);

	/* Insert in sorted order by module number */

	for (i = nummodules; i > 0 && modules[i - 1]->id > id; i--)
	    modules[i] = modules[i - 1];

	modules[i] = m;
	nummodules++;
    }

    m->nodes[m->nodecnt++] = n;

printk("module_add_node: module %x now has %d nodes\n", id, m->nodecnt);
    DPRINTF("module_add_node: module %x now has %d nodes\n", id, m->nodecnt);

    return m;
}
Ejemplo n.º 16
0
int monitor_hotplug(int action_ndx)
{
    /* This should probably be serialized - if PM runs in a separate
       context, it would seem possible for someone to "rmmmod usbdmonitor"
       (e.g. via "at") at the same time time PM decides to suspend.
       Unfortunately, there is no airtight way to accomplish that inside
       this module - once PM has called the registered fn, the "race"
       is on :(. */
    int rc;
    if (action_ndx < 0 || action_ndx > MHA_UNLOAD) {
        return(-EINVAL);
    }
    if (monitor_exiting) {
        if (MHA_UNLOAD != action_ndx) {
            return(-EINVAL);
	}
	if (MONITOR_UNLOADED == monitor.status ||
            MONITOR_UNLOADING == monitor.status) {
            /* No need to do it again... */
            return(0);
        }
    }
    printk(KERN_DEBUG "monitor_hotplug: agent: usbd interface: monitor action: %s\n", hotplug_actions[action_ndx]);
#if defined(CONFIG_USBD_PROCFS) && defined(CONFIG_PM)
    /* Sync - fire up the script and wait for it to echo something to
              /proc/usb-monitor (or else PM SUSPEND may not work) */
    init_MUTEX_LOCKED(&hotplug_done);
    /* fire up the script */
    rc = hotplug("usbd", "monitor", hotplug_actions[action_ndx]);
    if (0 == rc) {
        /* wait for the nudge from a write to /proc/usb-monitor */
        init_timer(&hotplug_timeout);
        hotplug_timeout.data = 0;
        hotplug_timeout.function = hotplug_sync_over;
        hotplug_timeout.expires = jiffies + HOTPLUG_SYNC_TIMEOUT;
        add_timer(&hotplug_timeout);
        down_interruptible(&hotplug_done);
        del_timer(&hotplug_timeout);
    }
#else
    /* Async - fire up the script and return */
    rc = hotplug("usbd", "monitor", hotplug_actions[action_ndx]);
#endif
    return(rc);
}
Ejemplo n.º 17
0
int aac_fib_setup(struct aac_dev * dev)
{
	struct fib *fibptr;
	struct hw_fib *hw_fib_va;
	dma_addr_t hw_fib_pa;
	int i;

	while (((i = fib_map_alloc(dev)) == -ENOMEM)
	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
	}
	if (i<0)
		return -ENOMEM;
		
	hw_fib_va = dev->hw_fib_va;
	hw_fib_pa = dev->hw_fib_pa;
	memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
	/*
	 *	Initialise the fibs
	 */
	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 
	{
		fibptr->dev = dev;
		fibptr->hw_fib = hw_fib_va;
		fibptr->data = (void *) fibptr->hw_fib->data;
		fibptr->next = fibptr+1;	/* Forward chain the fibs */
		init_MUTEX_LOCKED(&fibptr->event_wait);
		spin_lock_init(&fibptr->event_lock);
		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
		hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
		fibptr->hw_fib_pa = hw_fib_pa;
		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
	}
	/*
	 *	Add the fib chain to the free list
	 */
	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
	/*
	 *	Enable this to debug out of queue space
	 */
	dev->free_fib = &dev->fibs[0];
	return 0;
}
Ejemplo n.º 18
0
static int my_stop(void)
{
    //shutdown kthread
    lock_kernel();
    init_MUTEX_LOCKED(&sleep_sem);
    mb();
    terminate = 1;
    mb();
    kill_proc(thread->pid, SIGKILL, 1);
    flush_scheduled_work();
    down(&sleep_sem);
    kill_proc(2, SIGCHLD, 1);
    
    remove_proc_entry(KB_PROC_LOCATION, 0);
    
    input_unregister_handler(&kbinput_handler);
 	
    return 0;
}
Ejemplo n.º 19
0
int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
	struct jffs2_raw_inode n;
	struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
	int ret;

	if (!f)
		return -ENOMEM;

	init_MUTEX_LOCKED(&f->sem);
	f->inocache = ic;

	ret = jffs2_do_read_inode_internal(c, f, &n);
	if (!ret) {
		up(&f->sem);
		jffs2_do_clear_inode(c, f);
	}
	kfree (f);
	return ret;
}
Ejemplo n.º 20
0
static int open_getadapter_fib(struct aac_dev * dev, void *arg)
{
	struct aac_fib_context * fibctx;
	int status;
	unsigned long flags;

	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
	if (fibctx == NULL) {
		status = -ENOMEM;
	} else {
		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
		fibctx->size = sizeof(struct aac_fib_context);
		/*
		 *	Initialize the mutex used to wait for the next AIF.
		 */
		init_MUTEX_LOCKED(&fibctx->wait_sem);
		fibctx->wait = 0;
		/*
		 *	Initialize the fibs and set the count of fibs on
		 *	the list to 0.
		 */
		fibctx->count = 0;
		INIT_LIST_HEAD(&fibctx->fibs);
		fibctx->jiffies = jiffies/HZ;
		/*
		 *	Now add this context onto the adapter's 
		 *	AdapterFibContext list.
		 */
		spin_lock_irqsave(&dev->fib_lock, flags);
		list_add_tail(&fibctx->next, &dev->fib_list);
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(arg,  &fibctx, sizeof(struct aac_fib_context *))) {
			status = -EFAULT;
		} else {
			status = 0;
		}	
	}
	return status;
}
Ejemplo n.º 21
0
/* This must only ever be called when no GC thread is currently running */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
{
	pid_t pid;
	int ret = 0;

	if (c->gc_task)
		BUG();

	init_MUTEX_LOCKED(&c->gc_thread_start);
	init_completion(&c->gc_thread_exit);

	pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
	if (pid < 0) {
		printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
		complete(&c->gc_thread_exit);
		ret = pid;
	} else {
		/* Wait for it... */
		D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
		down(&c->gc_thread_start);
	}
 
	return ret;
}
Ejemplo n.º 22
0
int hwSimInit(void)		//initialize hw registers, data mem, and so on.
{
    memset(&hwSimInfo, 0, sizeof(hwSimInfo));
    memset(&hwDeIrqInfo, 0, sizeof(hwDeIrqInfo));
    
    //malloc register space and init with default data.
    hwSimInfo.pRegBaseAddr = kmalloc(sizeof(hwRegGroup), GFP_KERNEL);
    if(hwSimInfo.pRegBaseAddr == NULL)
    {
        DV_DBG("%s: no mem for regSpace!\n", __FUNCTION__);
        return DV_ER_NOMEM;
    }
    
    memcpy(hwSimInfo.pRegBaseAddr, hwRegGroup, sizeof(hwRegGroup));
    hwSimInfo.regSpaceLen = sizeof(hwRegGroup);

    //malloc dats src space and zeroed it.
    hwSimInfo.pDataSrcAddr = kmalloc(DATA_SRC_SPACE_SIZE, GFP_KERNEL | __GFP_ZERO);
    if(hwSimInfo.pDataSrcAddr == NULL)
    {
        DV_DBG("%s: no mem for dataDestSpace!\n", __FUNCTION__);
        kfree(hwSimInfo.pRegBaseAddr);
        hwSimInfo.pRegBaseAddr = NULL;
        
        return DV_ER_NOMEM;
    }
    hwSimInfo.dataSrcSpaceLen = DATA_SRC_SPACE_SIZE;
    
    //malloc dats dest space and zeroed it.
    hwSimInfo.pDataDestAddr = kmalloc(DATA_DEST_SPACE_SIZE, GFP_KERNEL | __GFP_ZERO);
    if(hwSimInfo.pDataDestAddr == NULL)
    {
        DV_DBG("%s: no mem for dataDestSpace!\n", __FUNCTION__);
        kfree(hwSimInfo.pDataSrcAddr);
        hwSimInfo.pDataSrcAddr = NULL;
        kfree(hwSimInfo.pRegBaseAddr);
        hwSimInfo.pRegBaseAddr = NULL;
        
        return DV_ER_NOMEM;
    }
     hwSimInfo.dataDestSpaceLen = DATA_DEST_SPACE_SIZE;
     
    //malloc dats dest space and zeroed it.
    hwSimInfo.pDataHookAddr = kmalloc(DATA_HOOK_SPACE_SIZE, GFP_KERNEL | __GFP_ZERO);
    if(hwSimInfo.pDataHookAddr == NULL)
    {
        DV_DBG("%s: no mem for dataDestSpace!\n", __FUNCTION__);
        kfree(hwSimInfo.pDataDestAddr);
        hwSimInfo.pDataDestAddr = NULL;
        
        kfree(hwSimInfo.pDataSrcAddr);
        hwSimInfo.pDataSrcAddr = NULL;
        
        kfree(hwSimInfo.pRegBaseAddr);
        hwSimInfo.pRegBaseAddr = NULL;
        
        return DV_ER_NOMEM;
    }
     hwSimInfo.dataHookSpaceLen = DATA_HOOK_SPACE_SIZE;
    //printk("read32: %x\n", hwDeRegRead32(0xB01C000C));

    init_MUTEX(&hwSimInfo.regLock);
    init_MUTEX(&hwSimInfo.dataSrcLock);
    init_MUTEX(&hwSimInfo.dataDestLock);
    init_MUTEX(&hwSimInfo.dataHookLock);
    init_MUTEX_LOCKED(&hwSimInfo.simIsDone);

    hwSimInfo.pHwSimWq = create_workqueue("hwSimWq");
    INIT_WORK(&hwSimInfo.dvDataWork, hwDataRxInProcess);
    INIT_WORK(&hwSimInfo.deDataWork, hwDataTxOutProcess);
    
    hwSimInfo.state = HW_SIM_STATE_IDLE;
    
    return DV_OK;
}
Ejemplo n.º 23
0
static int __init mc32_probe1(struct net_device *dev, int slot)
{
    static unsigned version_printed;
    int i, err;
    u8 POS;
    u32 base;
    struct mc32_local *lp = netdev_priv(dev);
    static u16 mca_io_bases[]= {
        0x7280,0x7290,
        0x7680,0x7690,
        0x7A80,0x7A90,
        0x7E80,0x7E90
    };
    static u32 mca_mem_bases[]= {
        0x00C0000,
        0x00C4000,
        0x00C8000,
        0x00CC000,
        0x00D0000,
        0x00D4000,
        0x00D8000,
        0x00DC000
    };
    static char *failures[]= {
        "Processor instruction",
        "Processor data bus",
        "Processor data bus",
        "Processor data bus",
        "Adapter bus",
        "ROM checksum",
        "Base RAM",
        "Extended RAM",
        "82586 internal loopback",
        "82586 initialisation failure",
        "Adapter list configuration error"
    };

    /* Time to play MCA games */

    if (mc32_debug  &&  version_printed++ == 0)
        printk(KERN_DEBUG "%s", version);

    printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);

    POS = mca_read_stored_pos(slot, 2);

    if(!(POS&1))
    {
        printk(" disabled.\n");
        return -ENODEV;
    }

    /* Fill in the 'dev' fields. */
    dev->base_addr = mca_io_bases[(POS>>1)&7];
    dev->mem_start = mca_mem_bases[(POS>>4)&7];

    POS = mca_read_stored_pos(slot, 4);
    if(!(POS&1))
    {
        printk("memory window disabled.\n");
        return -ENODEV;
    }

    POS = mca_read_stored_pos(slot, 5);

    i=(POS>>4)&3;
    if(i==3)
    {
        printk("invalid memory window.\n");
        return -ENODEV;
    }

    i*=16384;
    i+=16384;

    dev->mem_end=dev->mem_start + i;

    dev->irq = ((POS>>2)&3)+9;

    if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
    {
        printk("io 0x%3lX, which is busy.\n", dev->base_addr);
        return -EBUSY;
    }

    printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
           dev->base_addr, dev->irq, dev->mem_start, i/1024);


    /* We ought to set the cache line size here.. */


    /*
     *	Go PROM browsing
     */

    /* Retrieve and print the ethernet address. */
    for (i = 0; i < 6; i++)
    {
        mca_write_pos(slot, 6, i+12);
        mca_write_pos(slot, 7, 0);

        dev->dev_addr[i] = mca_read_pos(slot,3);
    }

    printk("%s: Address %pM", dev->name, dev->dev_addr);

    mca_write_pos(slot, 6, 0);
    mca_write_pos(slot, 7, 0);

    POS = mca_read_stored_pos(slot, 4);

    if(POS&2)
        printk(" : BNC port selected.\n");
    else
        printk(" : AUI port selected.\n");

    POS=inb(dev->base_addr+HOST_CTRL);
    POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
    POS&=~HOST_CTRL_INTE;
    outb(POS, dev->base_addr+HOST_CTRL);
    /* Reset adapter */
    udelay(100);
    /* Reset off */
    POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
    outb(POS, dev->base_addr+HOST_CTRL);

    udelay(300);

    /*
     *	Grab the IRQ
     */

    err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
    if (err) {
        release_region(dev->base_addr, MC32_IO_EXTENT);
        printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
        goto err_exit_ports;
    }

    memset(lp, 0, sizeof(struct mc32_local));
    lp->slot = slot;

    i=0;

    base = inb(dev->base_addr);

    while(base == 0xFF)
    {
        i++;
        if(i == 1000)
        {
            printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
            err = -ENODEV;
            goto err_exit_irq;
        }
        udelay(1000);
        if(inb(dev->base_addr+2)&(1<<5))
            base = inb(dev->base_addr);
    }

    if(base>0)
    {
        if(base < 0x0C)
            printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
                   base<0x0A?" test failure":"");
        else
            printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
        err = -ENODEV;
        goto err_exit_irq;
    }

    base=0;
    for(i=0; i<4; i++)
    {
        int n=0;

        while(!(inb(dev->base_addr+2)&(1<<5)))
        {
            n++;
            udelay(50);
            if(n>100)
            {
                printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
                err = -ENODEV;
                goto err_exit_irq;
            }
        }

        base|=(inb(dev->base_addr)<<(8*i));
    }

    lp->exec_box=isa_bus_to_virt(dev->mem_start+base);

    base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];

    lp->base = dev->mem_start+base;

    lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
    lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);

    lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);

    /*
     *	Descriptor chains (card relative)
     */

    lp->tx_chain 		= lp->exec_box->data[8];   /* Transmit list start offset */
    lp->rx_chain 		= lp->exec_box->data[10];  /* Receive list start offset */
    lp->tx_len 		= lp->exec_box->data[9];   /* Transmit list count */
    lp->rx_len 		= lp->exec_box->data[11];  /* Receive list count */

    init_MUTEX_LOCKED(&lp->cmd_mutex);
    init_completion(&lp->execution_cmd);
    init_completion(&lp->xceiver_cmd);

    printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
           dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);

    dev->open		= mc32_open;
    dev->stop		= mc32_close;
    dev->hard_start_xmit	= mc32_send_packet;
    dev->get_stats		= mc32_get_stats;
    dev->set_multicast_list = mc32_set_multicast_list;
    dev->tx_timeout		= mc32_timeout;
    dev->watchdog_timeo	= HZ*5;	/* Board does all the work */
    dev->ethtool_ops	= &netdev_ethtool_ops;

    return 0;

err_exit_irq:
    free_irq(dev->irq, dev);
err_exit_ports:
    release_region(dev->base_addr, MC32_IO_EXTENT);
    return err;
}
Ejemplo n.º 24
0
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
	struct aac_fib_context * fibctx;
	int status;

	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
	if (fibctx == NULL) {
		status = -ENOMEM;
	} else {
		unsigned long flags;
		struct list_head * entry;
		struct aac_fib_context * context;

		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
		fibctx->size = sizeof(struct aac_fib_context);
		/*
		 *	Yes yes, I know this could be an index, but we have a
		 * better guarantee of uniqueness for the locked loop below.
		 * Without the aid of a persistent history, this also helps
		 * reduce the chance that the opaque context would be reused.
		 */
		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
		/*
		 *	Initialize the mutex used to wait for the next AIF.
		 */
		init_MUTEX_LOCKED(&fibctx->wait_sem);
		fibctx->wait = 0;
		/*
		 *	Initialize the fibs and set the count of fibs on
		 *	the list to 0.
		 */
		fibctx->count = 0;
		INIT_LIST_HEAD(&fibctx->fib_list);
		fibctx->jiffies = jiffies/HZ;
		/*
		 *	Now add this context onto the adapter's
		 *	AdapterFibContext list.
		 */
		spin_lock_irqsave(&dev->fib_lock, flags);
		/* Ensure that we have a unique identifier */
		entry = dev->fib_list.next;
		while (entry != &dev->fib_list) {
			context = list_entry(entry, struct aac_fib_context, next);
			if (context->unique == fibctx->unique) {
				/* Not unique (32 bits) */
				fibctx->unique++;
				entry = dev->fib_list.next;
			} else {
				entry = entry->next;
			}
		}
		list_add_tail(&fibctx->next, &dev->fib_list);
		spin_unlock_irqrestore(&dev->fib_lock, flags);
		if (copy_to_user(arg, &fibctx->unique,
						sizeof(fibctx->unique))) {
			status = -EFAULT;
		} else {
			status = 0;
		}
	}
	return status;
}
Ejemplo n.º 25
0
/**
 * driver_register - register driver with bus
 * @drv: driver to register
 *
 * We pass off most of the work to the bus_add_driver() call,
 * since most of the things we have to do deal with the bus
 * structures.
 *
 * The one interesting aspect is that we initialize @drv->unload_sem
 * to a locked state here. It will be unlocked when the driver
 * reference count reaches 0.
 */
int driver_register(struct device_driver * drv)
{
    INIT_LIST_HEAD(&drv->devices);
    init_MUTEX_LOCKED(&drv->unload_sem);
    return bus_add_driver(drv);
}
Ejemplo n.º 26
0
/*
 * Separate initcall needed for semaphore initialization since
 * crw_handle_channel_report might be called before crw_machine_check_init.
 */
static int __init crw_init_semaphore(void)
{
	init_MUTEX_LOCKED(&crw_semaphore);
	return 0;
}
Ejemplo n.º 27
0
static int __devinit
dm3730logic_cf_alloc(struct platform_device *pdev, int id, unsigned long physaddr,
		unsigned long physize, int irq, int gpio, int bus_width)
{
	struct device *dev = &pdev->dev;
	struct dm3730logic_cf_data *cf_data = dev->platform_data;
	struct cf_device *cf;
	struct request_queue *rq;
	int rc;

	DPRINTK(DEBUG_CF_GENDISK, "%s: dev %p\n", __FUNCTION__, dev);

	if (!physaddr) {
		rc = -ENODEV;
		goto err_noreg;
	}

	/* Allocate and initialize the cf device structure */
	cf = kzalloc(sizeof(struct cf_device), GFP_KERNEL);
	if (!cf) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	platform_set_drvdata(pdev, cf);

	cf->dev = dev;
	cf->id = id;
	cf->physaddr = physaddr;
	cf->physize = physize;
	cf->irq = irq;
	cf->gpio_cd = cf_data->gpio_cd;
	cf->gpio_reset = cf_data->gpio_reset;
	cf->gpio_en = cf_data->gpio_en;
	cf->bus_width = bus_width;

	/* We fake it as ejected to start with */
	cf->ejected = 1;

	rq = blk_init_queue(cf_request, &cf->blk_lock);
	if (rq == NULL) {
		DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);
		return -ENOMEM;
	}
	blk_queue_logical_block_size(rq, 512);

	// Limit requests to simple contiguous ones
	blk_queue_max_sectors(rq, 8);  //4KB
	blk_queue_max_phys_segments(rq, 1);
	blk_queue_max_hw_segments(rq, 1);

	cf->queue = rq;

	// The IRQ semaphore is locked and only in the IRQ is it released
	init_MUTEX_LOCKED(&cf->irq_sem);

	/* The RW semaphore to have only one call into either read/write
	 * at a time */
	init_MUTEX(&cf->rw_sem);

	init_completion(&cf->task_completion);

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	// Create the thread that sits and waits for an interrupt
	rc = kernel_thread(cf_thread, cf, CLONE_KERNEL);
	if (rc < 0) {
		printk("%s:%d thread create fail! %d\n", __FUNCTION__, __LINE__, rc);
		goto err_setup;
	} else {
		wait_for_completion(&cf->task_completion);
	}

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	/* Call the setup code */
	rc = dm3730logic_cf_setup(cf);
	if (rc)
		goto err_setup;

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	dev_set_drvdata(dev, cf);


	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	return 0;

err_setup:
	dev_set_drvdata(dev, NULL);
	kfree(cf);
err_alloc:
err_noreg:
	dev_err(dev, "could not initialize device, err=%i\n", rc);
	return rc;
}
Ejemplo n.º 28
0
static int __init ipcs_module_init(void)
{
  int rc = 0;
  int readyChkCnt = 0;
  struct timespec startTime, endTime;
  
  IPC_DEBUG(DBG_INFO,"[ipc]: ipcs_module_init start..\n");
  
  init_MUTEX_LOCKED(&g_ipc_info.ipc_sem);

  g_ipc_info.ipc_state = 0;

  g_ipc_info.devnum = MKDEV(IPC_MAJOR, 0);
  
  rc = register_chrdev_region(g_ipc_info.devnum, 1, "bcm_fuse_ipc");
  if (rc < 0) 
  {
    IPC_DEBUG(DBG_ERROR,"Error registering the IPC device\n");
    goto out;
  }

  cdev_init(&g_ipc_info.cdev, &ipc_ops);
  
  g_ipc_info.cdev.owner = THIS_MODULE;

  rc = cdev_add(&g_ipc_info.cdev, g_ipc_info.devnum, 1);
  if (rc) 
  {
    IPC_DEBUG(DBG_ERROR,"[ipc]: cdev_add errpr\n");
    goto out_unregister;
  }

  IPC_DEBUG(DBG_INFO, "[ipc]: create_workqueue\n");
 
  INIT_WORK(&g_ipc_info.cp_crash_dump_wq, ProcessCPCrashedDump);
  INIT_WORK(&g_ipc_info.intr_work, ipcs_intr_workqueue_process);

  g_ipc_info.intr_workqueue = create_workqueue("ipc-wq");
  if (!g_ipc_info.intr_workqueue)
  {
    IPC_DEBUG(DBG_ERROR,"[ipc]: cannot create workqueue\n");
    goto out_unregister;
  } 


  IPC_DEBUG(DBG_INFO, "[ipc]: request_irq\n");
  rc = request_irq(IRQ_IPC_C2A, ipcs_interrupt, IRQF_NO_SUSPEND, "ipc-intr", &g_ipc_info);
  if (rc) 
  {
    IPC_DEBUG(DBG_ERROR,"[ipc]: request_irq error\n");
    goto out_del;
  }
  
  /**
     Make sure this is not cache'd because CP has to know about any changes
     we write to this memory immediately.
   */
  IPC_DEBUG(DBG_INFO, "[ipc]: ioremap_nocache IPC_BASE\n");
  g_ipc_info.apcp_shmem = ioremap_nocache(IPC_BASE, IPC_SIZE);
  if (!g_ipc_info.apcp_shmem) 
  {
    rc = -ENOMEM;
    IPC_DEBUG(DBG_ERROR,"[ipc]: Could not map shmem\n");
    goto out_del;
  }
#ifdef CONFIG_HAS_WAKELOCK
  wake_lock_init(&ipc_wake_lock, WAKE_LOCK_SUSPEND, "ipc_wake_lock");
#endif

  IPC_DEBUG(DBG_INFO, "[ipc]: ipcs_init\n");
  if (ipcs_init((void *)g_ipc_info.apcp_shmem, IPC_SIZE))
  {
    rc = -1;
    IPC_DEBUG(DBG_ERROR,"[ipc]: ipcs_init() failed\n");
    goto out_del;
  }
  
  if ( sEarlyCPInterrupt )
  {
    IPC_DEBUG(DBG_INFO,"[ipc]: early CP interrupt - doing crash dump...\n");
#ifdef CONFIG_HAS_WAKELOCK
    wake_lock(&ipc_wake_lock);
#endif
    schedule_work(&g_ipc_info.cp_crash_dump_wq);
  }
  
  // check for AP only boot mode
  if ( AP_ONLY_BOOT == get_ap_boot_mode() )
  {
      IPC_DEBUG(DBG_INFO,"[ipc]: AP only boot - not waiting for CP\n");
  }
  else
  {
  // wait for CP to have IPC setup as well; if we exit module init
  // before IPC is ready, RPC module will likely crash during its 
  // own init
  startTime = current_kernel_time();
  while ( !g_ipc_info.ipc_state )
  {
    IPC_DEBUG(DBG_INFO, "[ipc]: CP IPC not ready, sleeping...\n");
    msleep(20);
    readyChkCnt++;
    if ( readyChkCnt > 100 )
    {
        IPC_DEBUG(DBG_ERROR, "[ipc]: IPC init timeout - no response from CP\n");
        rc = -1;
        goto out_del;
    }
  }
  endTime = current_kernel_time();
  IPC_DEBUG(DBG_INFO,"readyChkCnt=%d time=%ldus\n", readyChkCnt,
        ((endTime.tv_sec - startTime.tv_sec)*1000000L+(endTime.tv_nsec - startTime.tv_nsec)/1000L));

  IPC_DEBUG(DBG_INFO,"[ipc]: ipcs_module_init ok\n");
  }
    
  return 0;

out_del:
  cdev_del(&g_ipc_info.cdev);
out_unregister:
  unregister_chrdev_region(g_ipc_info.devnum, 1);
out:
  IPC_DEBUG(DBG_ERROR,"IPC Driver Failed to initialise!\n");
  return rc;
}
Ejemplo n.º 29
0
PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool, const char *thread_name,
				      pj_thread_proc *proc, void *arg,
				      pj_size_t stack_size, unsigned flags,
				      pj_thread_t **ptr_thread)
{
    pj_thread_t *thread;

    TRACE_((THIS_FILE, "pj_thread_create()"));
    
    PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL);

    thread = pj_pool_zalloc(pool, sizeof(pj_thread_t));
    if (!thread)
	return PJ_ENOMEM;

    PJ_UNUSED_ARG(stack_size);

    /* Thread name. */
    if (!thread_name) 
	thread_name = "thr%p";
    
    if (strchr(thread_name, '%')) {
	pj_snprintf(thread->obj_name, PJ_MAX_OBJ_NAME, thread_name, thread);
    } else {
	strncpy(thread->obj_name, thread_name, PJ_MAX_OBJ_NAME);
	thread->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
    }
    
    /* Init thread's semaphore. */
    TRACE_((THIS_FILE, "...init semaphores..."));
    init_MUTEX_LOCKED(&thread->startstop_sem);
    init_MUTEX_LOCKED(&thread->suspend_sem);

    thread->flags = flags;

    if ((flags & PJ_THREAD_SUSPENDED) == 0) {
	up(&thread->suspend_sem);
    }

    /* Store the functions and argument. */
    thread->func = proc;
    thread->arg = arg;
    
    /* Save return value. */
    *ptr_thread = thread;
    
    /* Create the new thread by running a task through keventd. */

#if 0
    /* Initialize the task queue struct. */
    thread->tq.sync = 0;
    INIT_LIST_HEAD(&thread->tq.list);
    thread->tq.routine = kthread_launcher;
    thread->tq.data = thread;

    /* and schedule it for execution. */
    schedule_task(&thread->tq);
#endif
    kthread_launcher(thread);

    /* Wait until thread has reached the setup_thread routine. */
    TRACE_((THIS_FILE, "...wait for the new thread..."));
    down(&thread->startstop_sem);

    TRACE_((THIS_FILE, "...main thread resumed..."));
    return PJ_SUCCESS;
}
Ejemplo n.º 30
0
int usb_stor_CBI_transport(Scsi_Cmnd *srb, struct us_data *us)
{
	int result;

	/* Set up for status notification */
	atomic_set(us->ip_wanted, 1);

	/* re-initialize the mutex so that we avoid any races with
	 * early/late IRQs from previous commands */
	init_MUTEX_LOCKED(&(us->ip_waitq));

	/* COMMAND STAGE */
	/* let's send the command via the control pipe */
	result = usb_stor_control_msg(us, usb_sndctrlpipe(us->pusb_dev,0),
				      US_CBI_ADSC, 
				      USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 
				      us->ifnum, srb->cmnd, srb->cmd_len);

	/* check the return code for the command */
	US_DEBUGP("Call to usb_stor_control_msg() returned %d\n", result);
	if (result < 0) {
		/* Reset flag for status notification */
		atomic_set(us->ip_wanted, 0);
	}

	/* if the command was aborted, indicate that */
	if (result == -ECONNRESET)
		return USB_STOR_TRANSPORT_ABORTED;

	/* STALL must be cleared when it is detected */
	if (result == -EPIPE) {
		US_DEBUGP("-- Stall on control pipe. Clearing\n");
		result = usb_stor_clear_halt(us,	
			usb_sndctrlpipe(us->pusb_dev, 0));

		/* if the command was aborted, indicate that */
		if (result == -ECONNRESET)
			return USB_STOR_TRANSPORT_ABORTED;
		return USB_STOR_TRANSPORT_FAILED;
	}

	if (result < 0) {
		/* Uh oh... serious problem here */
		return USB_STOR_TRANSPORT_ERROR;
	}

	/* DATA STAGE */
	/* transfer the data payload for this command, if one exists*/
	if (usb_stor_transfer_length(srb)) {
		usb_stor_transfer(srb, us);
		result = srb->result;
		US_DEBUGP("CBI data stage result is 0x%x\n", result);

		/* report any errors */
		if (result == US_BULK_TRANSFER_ABORTED) {
			atomic_set(us->ip_wanted, 0);
			return USB_STOR_TRANSPORT_ABORTED;
		}
		if (result == US_BULK_TRANSFER_FAILED) {
			atomic_set(us->ip_wanted, 0);
			return USB_STOR_TRANSPORT_FAILED;
		}
	}

	/* STATUS STAGE */

	/* go to sleep until we get this interrupt */
	US_DEBUGP("Current value of ip_waitq is: %d\n", atomic_read(&us->ip_waitq.count));
	down(&(us->ip_waitq));

	/* if we were woken up by an abort instead of the actual interrupt */
	if (atomic_read(us->ip_wanted)) {
		US_DEBUGP("Did not get interrupt on CBI\n");
		atomic_set(us->ip_wanted, 0);
		return USB_STOR_TRANSPORT_ABORTED;
	}

	US_DEBUGP("Got interrupt data (0x%x, 0x%x)\n", 
			us->irqdata[0], us->irqdata[1]);

	/* UFI gives us ASC and ASCQ, like a request sense
	 *
	 * REQUEST_SENSE and INQUIRY don't affect the sense data on UFI
	 * devices, so we ignore the information for those commands.  Note
	 * that this means we could be ignoring a real error on these
	 * commands, but that can't be helped.
	 */
	if (us->subclass == US_SC_UFI) {
		if (srb->cmnd[0] == REQUEST_SENSE ||
		    srb->cmnd[0] == INQUIRY)
			return USB_STOR_TRANSPORT_GOOD;
		else
			if (((unsigned char*)us->irq_urb->transfer_buffer)[0])
				return USB_STOR_TRANSPORT_FAILED;
			else
				return USB_STOR_TRANSPORT_GOOD;
	}

	/* If not UFI, we interpret the data as a result code 
	 * The first byte should always be a 0x0
	 * The second byte & 0x0F should be 0x0 for good, otherwise error 
	 */
	if (us->irqdata[0]) {
		US_DEBUGP("CBI IRQ data showed reserved bType %d\n",
				us->irqdata[0]);
		return USB_STOR_TRANSPORT_ERROR;
	}

	switch (us->irqdata[1] & 0x0F) {
		case 0x00: 
			return USB_STOR_TRANSPORT_GOOD;
		case 0x01: 
			return USB_STOR_TRANSPORT_FAILED;
		default: 
			return USB_STOR_TRANSPORT_ERROR;
	}

	/* we should never get here, but if we do, we're in trouble */
	return USB_STOR_TRANSPORT_ERROR;
}