Exemplo n.º 1
0
static void mv_proc_queue(void)
{
	struct mv_hba_msg *pmsg;

	/* work on queue non-stop, pre-empty me! */
	queue_state_set(MSG_QUEUE_PROC);

	while (1) {
		MV_DBG(DMSG_HBA, "__MV__ process queue starts.\n");
		spin_lock_irq(&mv_msg_queue.lock);
		if (list_empty(&mv_msg_queue.tasks)) {
			/* it's important we put queue_state_set here. */
			queue_state_set(MSG_QUEUE_IDLE);
			spin_unlock_irq(&mv_msg_queue.lock);
			MV_DBG(DMSG_HBA, "__MV__ process queue ends.\n");
			break;
		}
		pmsg = list_entry(mv_msg_queue.tasks.next, struct mv_hba_msg, msg_list);
		spin_unlock_irq(&mv_msg_queue.lock);

		hba_proc_msg(pmsg);

		/* clean the pmsg before returning it to free?*/
		pmsg->data = NULL;
		spin_lock_irq(&mv_msg_queue.lock);
		list_move_tail(&pmsg->msg_list, &(mv_msg_queue.free));
		spin_unlock_irq(&mv_msg_queue.lock);
		MV_DBG(DMSG_HBA, "__MV__ process queue ends.\n");
	}

}
Exemplo n.º 2
0
static void mv_wq_handler(struct work_struct *work)
#endif
{
	if (hba_msg_queue_empty() ){
		MV_DBG(DMSG_KERN,"__MV__  msg queue is empty.\n");
		return;
	}
	else if (!hba_msg_queue_empty() &&
	    MSG_QUEUE_IDLE == queue_state_get()) {
		MV_DBG(DMSG_KERN,"__MV__  msg queue isn't empty.\n");
		mv_proc_queue();
	}
}
Exemplo n.º 3
0
static void hba_proc_msg(struct mv_hba_msg *pmsg)
{
	PHBA_Extension phba;
	struct scsi_device *psdev;

	/* we don't do things without pmsg->data */
	if (NULL == pmsg->data)
		return;

	phba = (PHBA_Extension) pmsg->data;


	MV_DBG(DMSG_HBA, "__MV__ In hba_proc_msg.\n");

	MV_ASSERT(pmsg);

	switch (pmsg->msg) {
	case EVENT_DEVICE_ARRIVAL:
		if (scsi_add_device(phba->host, 0, pmsg->param, 0))
			MV_DBG(DMSG_SCSI,
			       "__MV__ add scsi disk %d-%d-%d failed.\n",
			       0, pmsg->param, 0);
		else
			MV_DBG(DMSG_SCSI,
			       "__MV__ add scsi disk %d-%d-%d.\n",
			       0, pmsg->param, 0);
		break;
	case EVENT_DEVICE_REMOVAL:
		psdev = scsi_device_lookup(phba->host, 0, pmsg->param, 0);

		if (NULL != psdev) {
			MV_DBG(DMSG_SCSI,
			       "__MV__ remove scsi disk %d-%d-%d.\n",
			       0, pmsg->param, 0);
			scsi_remove_device(psdev);
			scsi_device_put(psdev);
		} else {
			MV_DBG(DMSG_SCSI,
			       "__MV__ no disk to remove %d-%d-%d\n",
			       0, pmsg->param, 0);
		}
		break;
	case EVENT_HOT_PLUG:
		sata_hotplug(pmsg->data, pmsg->param);
		break;
	default:
		break;
	}
}
Exemplo n.º 4
0
MV_U32 mod_get_mem_size(PHBA_Extension pHBA, enum Resource_Type type,
			MV_U16 max_io)
{
        int i = 0;
        unsigned long quota = 0;
        unsigned long  oneQuota = 0;

        for (i=0; i<MAX_MODULE_NUMBER; i++) {
                if (module_set[i].get_mem_size != NULL) {
                        oneQuota = module_set[i].get_mem_size(type, max_io);
                        quota += ROUNDING(oneQuota, 8);
                        MV_DBG(DMSG_KERN,
                               "%s quota for module %d is 0x%lx.\n", 
                               type == RESOURCE_CACHED_MEMORY? "Cached memory" : "Uncached memory",
                               i, 
                               oneQuota);

                        if (oneQuota) {
                                if (type == RESOURCE_UNCACHED_MEMORY) {
                                        MV_PVOID uncached_virtual = NULL;
                                        uncached_virtual = pci_alloc_consistent(pHBA->pcidev,
                                                                                                  oneQuota,
                                                                                                  &pHBA->uncached_physical[i]);
                                        pHBA->uncached_size[i] = oneQuota;
                                        if (uncached_virtual != NULL)
                                                pHBA->uncached_virtual_address[i] = uncached_virtual;
#ifdef CACHE_MODULE_SUPPORT
                                        else if (i == MODULE_CACHE) 
                                                MV_DPRINT(("Module %d asks for uncached memory failed.\n", i));
#endif
                                        else
                                                return -1;
                                }
                        }
                }
        }

        /* Each extension needs one extension header which is hidden from module. */
        if ( type==RESOURCE_CACHED_MEMORY )
                quota += MODULE_HEADER_SIZE * MAX_MODULE_NUMBER;

        MV_DBG(DMSG_KERN, "%s quota totally is 0x%lx.\n",
                type==RESOURCE_CACHED_MEMORY? "Cached memory" : "Uncached memory",
                quota);

        return quota;
}
Exemplo n.º 5
0
void hba_msg_insert(void *data, unsigned int msg, unsigned int param)
{
	struct mv_hba_msg *pmsg;
	unsigned long flags;

	MV_DBG(DMSG_HBA, "__MV__ msg insert  %d.\n", msg);

	spin_lock_irqsave(&mv_msg_queue.lock, flags);
	if (list_empty(&mv_msg_queue.free)) {
		/* should wreck some havoc ...*/
		MV_DBG(DMSG_HBA, "-- MV -- Message queue is full.\n");
		spin_unlock_irqrestore(&mv_msg_queue.lock, flags);
		return;
	}

	pmsg = list_entry(mv_msg_queue.free.next, struct mv_hba_msg, msg_list);
	pmsg->data = data;
	pmsg->msg  = msg;

	switch (msg) {
	case EVENT_DEVICE_REMOVAL:
	case EVENT_DEVICE_ARRIVAL:
		pmsg->param = param;
		break;
	default:
		pmsg->param = param;
                /*(NULL==param)?0:*((unsigned int*) param);*/
		break;
	}

	list_move_tail(&pmsg->msg_list, &mv_msg_queue.tasks);
	spin_unlock_irqrestore(&mv_msg_queue.lock, flags);

#ifndef SUPPORT_WORKQUEUE
	if (house_keeper_task)
		wake_up_process(house_keeper_task);
#else
	schedule_work(&mv_wq);
#endif

}
Exemplo n.º 6
0
MV_VOID Tag_Init( PTag_Stack pTagStack, MV_U8 size )
{
	MV_U8 i;
	
	MV_DASSERT( size <= MAX_TAG_NUMBER );
#ifdef _OS_LINUX
	if ( pTagStack->Top && pTagStack->Top < size )
		MV_DBG(DMSG_CORE, "__MV__ Init an in-use tag pool "
		       "curr_size:init_size - %d:%d.\n", pTagStack->Top, size);
#endif /* _OS_LINUX */

	pTagStack->Top = size;
	for ( i=0; i<size; i++ )
	{
		pTagStack->Stack[i] = size-1-i;
	}
}
Exemplo n.º 7
0
void *mv_hba_init_ext(struct pci_dev *dev)
{
	int i;

	PModule_Header pheader;
	PHBA_Extension phba;
	PModule_Manage pmod;

	unsigned long total_size = 0;
	unsigned long size = 0;

	unsigned long addr;
	unsigned long range;

	dma_addr_t    dma_addr;
	BUS_ADDRESS   bus_addr;
	MV_PHYSICAL_ADDR phy_addr;
	

	/* allocate normal (CACHED) mem */
	for (i=0; i<MAX_MODULE_NUMBER; i++) {
		size = module_set[i].get_mem_size(RESOURCE_CACHED_MEMORY,
						  MAX_REQUEST_NUMBER);

		if ( 0 != size )
			total_size += ROUNDING(size, 8);
		
		WARN_ON(size != ROUNDING(size, 8));
		
	}

	/* init hba ext structure */
	total_size += ROUNDING(MODULE_HEADER_SIZE * MAX_MODULE_NUMBER, 8);

	MV_DBG(DMSG_HBA, "THOR : Memory quota is 0x%lx bytes.\n",
	       total_size);

	pheader = (PModule_Header) vmalloc(total_size);
	if ( NULL == pheader )
		return NULL;

	memset(pheader, 0, total_size);
	Module_AssignModuleExtension(pheader, MAX_REQUEST_NUMBER);
	
	phba = (PHBA_Extension) head_to_hba(pheader);
	phba->host_data = pheader;
	phba->pcidev    = dev;
	phba->Vendor_Id = dev->vendor;
	phba->Device_Id = dev->device;

	/* map pci resource */
	if (pci_read_config_byte(dev, PCI_REVISION_ID, &phba->Revision_Id)) {
		printk("THOR : Failed to get hba's revision id.\n");
		goto ext_err_mem;
	}
	
	for (i=0; i<MAX_BASE_ADDRESS; i++) {
		addr  = pci_resource_start(dev, i);
		range = pci_resource_len(dev, i);

		if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
			phba->Base_Address[i] =(MV_PVOID) ioremap(addr, range);
		else
			phba->Base_Address[i] =(MV_PVOID) addr;

		MV_DBG(DMSG_HBA, "THOR : BAR %d : %p.\n", i, 
		       phba->Base_Address[i]);
	}
	
	/* allocate consistent dma memory (uncached) */
	size = 0;
	total_size = 0;
	pmod = &phba->Module_Manage;
	
	for (i=0; i<MAX_MODULE_NUMBER; i++) {
	MV_DBG(DMSG_HBA, "THOR : i = %d module_set[i].get_mem_size %p\n", i,
	       module_set[i].get_mem_size);
	
		size = module_set[i].get_mem_size(RESOURCE_UNCACHED_MEMORY,
						  MAX_REQUEST_NUMBER);
		if (0 == size) 
			continue;
		WARN_ON(size != ROUNDING(size, 8));

		size = ROUNDING(size, 8);
		pmod->resource[i].uncached_address = (MV_PVOID) \
			pci_alloc_consistent(dev, size, &dma_addr);

		if ( NULL == pmod->resource[i].uncached_address )
			goto ext_err_dma;
		
		pmod->resource[i].uncached_size = size;
		bus_addr = (BUS_ADDRESS) dma_addr;
		phy_addr.low  = LO_BUSADDR(bus_addr);
		phy_addr.high = HI_BUSADDR(bus_addr);
		pmod->resource[i].uncached_physical_address = phy_addr;
		
	}

	MV_DBG(DMSG_HBA, "THOR : HBA ext struct init'ed at %p.\n", phba);
	return phba;

ext_err_dma:
	for (i=0; i<MAX_MODULE_NUMBER; i++) {
		if ( pmod->resource[i].uncached_size ) {
			phy_addr = pmod->resource[i].uncached_physical_address;
			dma_addr = (dma_addr_t) ( phy_addr.low | \
						  ((u64) phy_addr.high)<<32 );
			pci_free_consistent(dev, 
					    pmod->resource[i].uncached_size,
					    pmod->resource[i].uncached_address,
					    dma_addr);
		}
	}
ext_err_mem:
	vfree(pheader);
	return NULL;
}