void _rtw_init_sema(struct semaphore *sema, int init_val) { sema_init(sema, init_val); }
void rknand_device_lock_init(void) { sema_init(&g_rk_nand_ops_mutex, 1); }
static int mephisto_probe(struct usb_interface *interface, const struct usb_device_id *id) { int err = ME_ERRNO_SUCCESS; mephisto_usb_device_t* dev; /// The usb device. me_device_t* n_device = NULL; me_device_t* o_device = NULL; long unsigned int serial_no; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) char* tmp; #endif PDEBUG("executed.\n"); /// Allocate structures. dev = kzalloc(sizeof(mephisto_usb_device_t), GFP_KERNEL); if (!dev) { PERROR_CRITICAL("Can't get memory for device's instance.\n"); err = -ENOMEM; goto ERROR_0; } /// Initialize USB lock. dev->usb_semaphore = kzalloc(sizeof(struct semaphore), GFP_KERNEL); if (!dev->usb_semaphore) { PERROR_CRITICAL("Can't get memory for usb lock.\n"); err = -ENOMEM; goto ERROR_1; } #ifndef init_MUTEX sema_init(dev->usb_semaphore, 1); #else init_MUTEX(dev->usb_semaphore); #endif /// Initialize variables. dev->dev = usb_get_dev(interface_to_usbdev(interface)); if(!dev->dev) { PERROR("Error while request for usb device.\n"); err = -ENODEV; goto ERROR_2; } /// Initialize hardware usb_set_intfdata(interface, dev); /// Read serial number #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) tmp = (dev->dev->serial + strlen(dev->dev->serial)); serial_no = simple_strtoul(dev->dev->serial + 2, &tmp, 16); #else if (strict_strtoul(dev->dev->serial + 2, 16, &serial_no)) { serial_no = 0; } #endif dev->serial_no = serial_no; /// Hardware init mephisto_endpoints_reset(dev); /** Choice: a) New device connected. Add to device list. b) Old device reconected. Refresh device structure. */ o_device = find_device_on_list(dev, ME_PLUGGED_ANY); if(o_device) { PDEBUG("Old device.\n"); // Old device. if (o_device->bus.plugged == ME_PLUGGED_IN) { // Error device is already on list mark as active! PERROR("Device is already on list mark as active!\n"); o_device->me_device_disconnect(o_device); } } else { PDEBUG("New device.\n"); } PINFO("CALLING %s constructor\n", "mephisto_constr"); n_device = mephisto_constr(dev, o_device); if (!n_device) { PERROR("Executing '%s()' failed.\n", "mephisto_constr"); err = -ENODEV; goto ERROR_3; } else if (!o_device) { PINFO("Adding new entry to device list.\n"); insert_to_device_list(n_device); } if (n_device->me_device_postinit) { if (n_device->me_device_postinit(n_device, NULL)) { PERROR("Error while calling me_device_postinit().\n"); /// This error can be ignored. } else { PDEBUG("me_device_postinit() was sucessful.\n"); } } else { PERROR("me_device_postinit() not registred!\n"); } return 0; ERROR_3: usb_put_dev(interface_to_usbdev(interface)); ERROR_2: kfree(dev->usb_semaphore); ERROR_1: kfree(dev); ERROR_0: return err; }
static int tty0tty_open(struct tty_struct *tty, struct file *file) { struct tty0tty_serial *tty0tty; int index; int msr = 0; int mcr = 0; #ifdef SCULL_DEBUG printk(KERN_DEBUG "%s - \n", __FUNCTION__); #endif /* initialize the pointer in case something fails */ tty->driver_data = NULL; /* get the serial object associated with this tty pointer */ index = tty->index; tty0tty = tty0tty_table[index]; if (tty0tty == NULL) { /* first time accessing this device, let's create it */ tty0tty = kmalloc(sizeof(*tty0tty), GFP_KERNEL); if (!tty0tty) return -ENOMEM; sema_init(&tty0tty->sem, 1); tty0tty->open_count = 0; tty0tty_table[index] = tty0tty; } tport[index].tty = tty; tty->port = &tport[index]; if ((index % 2) == 0) { if (tty0tty_table[index + 1] != NULL) if (tty0tty_table[index + 1]->open_count > 0) mcr = tty0tty_table[index + 1]->mcr; } else { if (tty0tty_table[index - 1] != NULL) if (tty0tty_table[index - 1]->open_count > 0) mcr = tty0tty_table[index - 1]->mcr; } //null modem connection if ((mcr & MCR_RTS) == MCR_RTS) { msr |= MSR_CTS; } if ((mcr & MCR_DTR) == MCR_DTR) { msr |= MSR_DSR; msr |= MSR_CD; } tty0tty->msr = msr; tty0tty->mcr = 0; /* register the tty driver */ down(&tty0tty->sem); /* save our structure within the tty structure */ tty->driver_data = tty0tty; tty0tty->tty = tty; ++tty0tty->open_count; up(&tty0tty->sem); return 0; }
/******************************************************************************* * 函 数 名 : cshell_init * * 功能描述 : cshell初始化接口 * * 输入参数 : 无 * * 输出参数 : 无 * * 返 回 值 : CSHELL_ERROR-初始化失败,CSHELL_OK-初始化成功 * *******************************************************************************/ int cshell_init(void) { cshell_ctx_t *cshell_ctx = &g_cshell_ctx; DRV_UART_SHELL_FLAG uartcshell_nv = {.extendedbits = 0}; u32 channel_id = 0; printk("A:cshell_init start \n"); cshell_ctx->ccshell_work_flg = 1; cshell_ctx->cshell_acm_fd = 0; cshell_ctx->usb_send_buf = (u8*)kmalloc(CSHELL_BUFFER_SIZE, GFP_KERNEL); if(!cshell_ctx->usb_send_buf) { printk("[ACSHELL]kmalloc fails line:%d", __LINE__); goto error; } cshell_ctx->usb_send_buf_size = CSHELL_BUFFER_SIZE; cshell_ctx->usb_send_buf_r = 0; cshell_ctx->usb_send_buf_w = 0; cshell_icc_init(); sema_init(&(cshell_ctx->cshell_send_sem), 0); sema_init(&(cshell_ctx->cshell_recv_sem), 0); sema_init(&(cshell_ctx->cshell_usb_send_sem), 0); spin_lock_init(&g_cshell_ctx.cshell_spin_loc_permit); if(bsp_nvm_read(NV_ID_DRV_UART_SHELL_FLAG, (u8 *)&uartcshell_nv, sizeof(DRV_UART_SHELL_FLAG))) { printk("[A:CSHELL]:bsp_nvm_read fails line:%d uartcshell_nv.cshell_to_auart:%d\n", __LINE__, uartcshell_nv.extendedbits); } if(uartcshell_nv.extendedbits & 0x1U) { cshell_set_bit(AUART_CSHELL); }else{ cshell_clear_bit(AUART_CSHELL); } printk("A :icc channel[%d] open sucess \n", ICC_CHN_CSHELL); channel_id = cshell_ctx->icc_channel_id << 16; if(bsp_icc_event_register(channel_id ,cshell_read_cb, NULL, NULL, NULL)) printk(KERN_ERR "A:CSHELL bsp_icc_event_register fail"); cshell_ctx->send_task = kthread_run(cshell_send_thread, NULL, "cshell_send_thread"); if (IS_ERR(cshell_ctx->send_task)) { printk("A:cshell_init send thread create fail\n"); goto error; } cshell_ctx->recv_task = kthread_run(cshell_recv_thread, NULL, "cshell_recv_thread"); if (IS_ERR(cshell_ctx->recv_task)) { printk("A:cshell_init recv thread create fail\n"); goto error; } cshell_ctx->send_task_usb = kthread_run(cshell_usb_send_thread, NULL, "cshell_usb_send_thread"); if (IS_ERR(cshell_ctx->recv_task)) { printk("[ACSHELL] fail to create cshell_usb_send_thread\n"); goto error; } printk("A:cshell_init udi cb register\n"); /* 注册USB插入回调函数*/ bsp_usb_register_enablecb(cshell_udi_open_cb); bsp_usb_register_disablecb(cshell_udi_close_cb); cshell_ctx->valid = 1; printk("A:cshell_init ok\n"); return CSHELL_OK; error: cshell_uninit(); printk("A:cshell_init fail...\n"); return CSHELL_ERROR; } //#ifndef OS_ANDROID_USE_K3V3_KERNEL module_init(cshell_init); //#endif #ifdef __cplusplus }
sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, struct _adapter *padapter) { sint i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); spin_lock_init(&pxmitpriv->lock); sema_init(&pxmitpriv->xmit_sema, 0); sema_init(&pxmitpriv->terminate_xmitthread_sema, 0); /* Please insert all the queue initializaiton using _init_queue below */ pxmitpriv->adapter = padapter; _init_queue(&pxmitpriv->be_pending); _init_queue(&pxmitpriv->bk_pending); _init_queue(&pxmitpriv->vi_pending); _init_queue(&pxmitpriv->vo_pending); _init_queue(&pxmitpriv->bm_pending); _init_queue(&pxmitpriv->legacy_dz_queue); _init_queue(&pxmitpriv->apsd_queue); _init_queue(&pxmitpriv->free_xmit_queue); /* Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, and initialize free_xmit_frame below. Please also apply free_txobj to link_up all the xmit_frames... */ pxmitpriv->pallocated_frame_buf = _malloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4); if (pxmitpriv->pallocated_frame_buf == NULL) { pxmitpriv->pxmit_frame_buf = NULL; return _FAIL; } pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - ((addr_t) (pxmitpriv->pallocated_frame_buf) & 3); pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf; for (i = 0; i < NR_XMITFRAME; i++) { _init_listhead(&(pxframe->list)); pxframe->padapter = padapter; pxframe->frame_tag = DATA_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue)); pxframe++; } pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; /* init xmit hw_txqueue */ _r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->vo_txqueue, VO_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX); pxmitpriv->frag_len = MAX_FRAG_THRESHOLD; pxmitpriv->txirp_cnt = 1; sema_init(&(pxmitpriv->tx_retevt), 0); /*per AC pending irp*/ pxmitpriv->beq_cnt = 0; pxmitpriv->bkq_cnt = 0; pxmitpriv->viq_cnt = 0; pxmitpriv->voq_cnt = 0; /*init xmit_buf*/ _init_queue(&pxmitpriv->free_xmitbuf_queue); _init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = _malloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4); if (pxmitpriv->pallocated_xmitbuf == NULL) return _FAIL; pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; for (i = 0; i < NR_XMITBUFF; i++) { _init_listhead(&pxmitbuf->list); pxmitbuf->pallocated_buf = _malloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ); if (pxmitbuf->pallocated_buf == NULL) return _FAIL; pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ - ((addr_t) (pxmitbuf->pallocated_buf) & (XMITBUF_ALIGN_SZ - 1)); r8712_xmit_resource_alloc(padapter, pxmitbuf); list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue)); pxmitbuf++; } pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF; alloc_hwxmits(padapter); init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); tasklet_init(&pxmitpriv->xmit_tasklet, (void(*)(addr_t))r8712_xmit_bh, (addr_t)padapter); return _SUCCESS; }
static int load_exeso_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct elfhdr *elf_ex; struct elf_phdr *elf_phdata = NULL; struct mm_struct *mm; unsigned long load_addr = 0; unsigned long error; int retval = 0; unsigned long pe_entry, ntdll_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long ntdll_entry; int executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; unsigned long stack_top; #ifdef NTDLL_SO unsigned long interp_load_addr; unsigned long interp_entry; #endif struct eprocess *process; struct ethread *thread; PRTL_USER_PROCESS_PARAMETERS ppb; OBJECT_ATTRIBUTES ObjectAttributes; INITIAL_TEB init_teb; BOOLEAN is_win32=FALSE; struct startup_info *info=NULL; struct eprocess *parent_eprocess=NULL; struct ethread *parent_ethread=NULL; struct w32process* child_w32process =NULL; struct w32process* parent_w32process =NULL; elf_ex = (struct elfhdr *)bprm->buf; retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) goto out; if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) goto out; if (!elf_check_arch(elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) goto out; if (elf_ex->e_phnum < 1 || elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; if(!check_exeso(bprm)) goto out; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; if(current->parent->ethread) { is_win32 = TRUE; parent_ethread = current->parent->ethread; parent_eprocess = parent_ethread->threads_process; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) { goto out; } /* OK, This is the point of no return */ mm = current->mm; current->flags &= ~PF_FORKNOEXEC; mm->def_flags = def_flags; current->signal->rlim[RLIMIT_STACK].rlim_cur = WIN32_STACK_LIMIT; current->signal->rlim[RLIMIT_STACK].rlim_max = WIN32_STACK_LIMIT; current->personality |= ADDR_COMPAT_LAYOUT; arch_pick_mmap_layout(mm); /* Do this so that we can load the ntdll, if need be. We will change some of these later */ mm->free_area_cache = mm->mmap_base = WIN32_UNMAPPED_BASE; mm->cached_hole_size = 0; stack_top = WIN32_STACK_LIMIT + WIN32_LOWEST_ADDR; retval = setup_arg_pages(bprm, stack_top, executable_stack); if (retval < 0) goto out_free_file; down_write(&mm->mmap_sem); /* reserve first 0x100000 */ do_mmap_pgoff(NULL, 0, WIN32_LOWEST_ADDR, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); /* reserve first 0x7fff0000 - 0x80000000 */ do_mmap_pgoff(NULL, WIN32_TASK_SIZE - 0x10000, 0x10000, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); /* reserve first 0x81000000 - 0xc0000000 * 0x80000000 - 0x81000000 used for wine SYSTEM_HEAP */ do_mmap_pgoff(NULL, WIN32_TASK_SIZE + WIN32_SYSTEM_HEAP_SIZE, TASK_SIZE - WIN32_TASK_SIZE - WIN32_SYSTEM_HEAP_SIZE, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); up_write(&mm->mmap_sem); #ifdef NTDLL_SO /* search ntdll.dll.so in $PATH, default is /usr/local/lib/wine/ntdll.dll.so */ if (!*ntdll_name) search_ntdll(); /* map ntdll.dll.so */ map_system_dll(current, ntdll_name, &ntdll_load_addr, &interp_load_addr); pe_entry = get_pe_entry(); ntdll_entry = get_ntdll_entry(); interp_entry = get_interp_entry(); #endif set_binfmt(&exeso_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES retval = arch_setup_additional_pages(bprm, executable_stack); if (retval < 0) { goto out_free_file; } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; #ifdef NTDLL_SO /* copy argv, env, and auxvec to stack, all for interpreter */ create_elf_tables_aux(bprm, ntdll_load_addr, ntdll_phoff, ntdll_phnum, get_ntdll_start_thunk(), load_addr, elf_ex->e_phoff, elf_ex->e_phnum, 0, interp_load_addr, interp_entry, 0); #endif mm->end_code = end_code; mm->start_code = start_code; mm->start_data = start_data; mm->end_data = end_data; mm->start_stack = bprm->p; if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ down_write(&mm->mmap_sem); error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(&mm->mmap_sem); } /* create win-related structure */ INIT_OBJECT_ATTR(&ObjectAttributes, NULL, 0, NULL, NULL); /* Create EPROCESS */ retval = create_object(KernelMode, process_object_type, &ObjectAttributes, KernelMode, NULL, sizeof(struct eprocess), 0, 0, (PVOID *)&process); if (retval != STATUS_SUCCESS) { goto out_free_file; } /* init eprocess */ eprocess_init(NULL, FALSE, process); process->unique_processid = create_cid_handle(process, process_object_type); if (!process->unique_processid) goto out_free_eproc; /* initialize EProcess and KProcess */ process->section_base_address = (void *)load_addr; /* FIXME: PsCreateCidHandle */ /* Create PEB */ if ((retval = create_peb(process))) goto out_free_process_cid; /* Create PPB */ if(is_win32 == FALSE) { create_ppb(&ppb, process, bprm, bprm->filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL); ((PEB *)process->peb)->ProcessParameters = ppb; } /* allocate a Win32 thread object */ retval = create_object(KernelMode, thread_object_type, &ObjectAttributes, KernelMode, NULL, sizeof(struct ethread), 0, 0, (PVOID *)&thread); if (retval) { goto out_free_process_cid; } thread->cid.unique_thread = create_cid_handle(thread, thread_object_type); thread->cid.unique_process = process->unique_processid; if (!thread->cid.unique_thread) goto out_free_ethread; /* set the teb */ init_teb.StackBase = (PVOID)(bprm->p); init_teb.StackLimit = (PVOID)WIN32_LOWEST_ADDR + PAGE_SIZE; thread->tcb.teb = create_teb(process, (PCLIENT_ID)&thread->cid, &init_teb); if (IS_ERR(thread->tcb.teb)) { retval = PTR_ERR(thread->tcb.teb); goto out_free_thread_cid; } /* Init KThreaad */ ethread_init(thread, process, current); sema_init(&thread->exec_semaphore,0); if (is_win32 == TRUE) //parent is a windows process { down(&thread->exec_semaphore); //wait for the parent child_w32process = process->win32process; parent_w32process = parent_eprocess->win32process; info = child_w32process->startup_info; //now parent has finished its work if(thread->inherit_all) { create_handle_table(parent_eprocess, TRUE, process); child_w32process = create_w32process(parent_w32process, TRUE, process); } } deref_object(process); deref_object(thread); set_teb_selector(current, (long)thread->tcb.teb); thread->start_address = (void *)pe_entry; /* FIXME */ /* save current trap frame */ thread->tcb.trap_frame = (struct ktrap_frame *)regs; /* init apc, to call LdrInitializeThunk */ #if 0 thread_apc = kmalloc(sizeof(KAPC), GFP_KERNEL); if (!thread_apc) { retval = -ENOMEM; goto out_free_thread_cid; } apc_init(thread_apc, &thread->tcb, OriginalApcEnvironment, thread_special_apc, NULL, (PKNORMAL_ROUTINE)ntdll_entry, UserMode, (void *)(bprm->p + 12)); insert_queue_apc(thread_apc, (void *)interp_entry, (void *)extra_page, IO_NO_INCREMENT); #ifndef TIF_APC #define TIF_APC 13 #endif set_tsk_thread_flag(current, TIF_APC); #endif #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically links apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, interp_entry, bprm->p); if (unlikely(current->ptrace & PT_PTRACED)) { if (current->ptrace & PT_TRACE_EXEC) ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); else send_sig(SIGTRAP, current, 0); } retval = 0; try_module_get(THIS_MODULE); /* return from w32syscall_exit, not syscall_exit */ ((unsigned long *)regs)[-1] = (unsigned long)w32syscall_exit; regs->fs = TEB_SELECTOR; out: if(elf_phdata) kfree(elf_phdata); return retval; /* error cleanup */ out_free_thread_cid: delete_cid_handle(thread->cid.unique_thread, thread_object_type); out_free_ethread: deref_object(thread); out_free_process_cid: delete_cid_handle(process->unique_processid, process_object_type); out_free_eproc: deref_object(process); out_free_file: send_sig(SIGKILL, current, 0); goto out; }
/** * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device * * Returns: 0 if a new adapter was successfully enqueued * -ENOMEM if alloc failed * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. * locks: config_sema must be held to serialise changes to the adapter list */ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; /* * Note: It is safe to release the list_lock, as any list changes * are protected by the config_sema, which must be held to get here */ adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); if (!adapter->gs) { kfree(adapter); return -ENOMEM; } ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); if (zfcp_qdio_allocate(adapter)) goto qdio_allocate_failed; if (zfcp_allocate_low_mem_buffers(adapter)) goto failed_low_mem_buffers; if (zfcp_reqlist_alloc(adapter)) goto failed_low_mem_buffers; if (zfcp_adapter_debug_register(adapter)) goto debug_register_failed; init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); INIT_LIST_HEAD(&adapter->port_list_head); INIT_LIST_HEAD(&adapter->erp_ready_head); INIT_LIST_HEAD(&adapter->erp_running_head); spin_lock_init(&adapter->req_list_lock); spin_lock_init(&adapter->hba_dbf_lock); spin_lock_init(&adapter->san_dbf_lock); spin_lock_init(&adapter->scsi_dbf_lock); spin_lock_init(&adapter->rec_dbf_lock); spin_lock_init(&adapter->req_q_lock); spin_lock_init(&adapter->qdio_stat_lock); rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); sema_init(&adapter->erp_ready_sem, 0); INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); adapter->service_level.seq_print = zfcp_print_sl; /* mark adapter unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); dev_set_drvdata(&ccw_device->dev, adapter); if (sysfs_create_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs)) goto sysfs_failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); zfcp_fc_wka_ports_init(adapter); if (!zfcp_adapter_scsi_register(adapter)) return 0; sysfs_failed: zfcp_adapter_debug_unregister(adapter); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list); failed_low_mem_buffers: zfcp_free_low_mem_buffers(adapter); qdio_allocate_failed: zfcp_qdio_free(adapter); kfree(adapter); return -ENOMEM; }
static int pn544_probe( struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct pn544_i2c_platform_data *pdata; if (pn544_dev != NULL) { printk(KERN_ERR "pn544_probe: multiple devices NOT supported\n"); ret = -ENODEV; goto err_single_device; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { printk(KERN_ERR "pn544_probe: need I2C_FUNC_I2C\n"); ret = -ENODEV; goto err_check_functionality_failed; } pn544_dev = kzalloc(sizeof(*pn544_dev), GFP_KERNEL); if (pn544_dev == NULL) { printk(KERN_ERR "pn544_probe: out of memory\n"); ret = -ENOMEM; goto err_alloc_data_failed; } pn544_dev->client = client; pdata = client->dev.platform_data; if (pdata) { pn544_dev->irq_gpio=pdata->irq_gpio; pn544_dev->firm_gpio=pdata->firm_gpio; pn544_dev->ven_gpio=pdata->ven_gpio; pn544_dev->clock_gpio=pdata->clock_gpio; pn544_dev->dcdc_gpio=pdata->dcdc_gpio; pn544_dev->int_active_low=pdata->int_active_low; printk( "pn544_probe: gpio config data,irq=%d,download=%d,ven=%d,clock=%d,dcdc=%d,int_active_low=%d\n",pn544_dev->irq_gpio,pn544_dev->firm_gpio,pn544_dev->ven_gpio,pn544_dev->clock_gpio,pn544_dev->dcdc_gpio,pn544_dev->int_active_low); } else { printk(KERN_ERR "pn544_probe: no gpio config data\n"); ret = -ENODEV; goto err_alloc_data_failed; } /* init semaphore and queues */ sema_init(&pn544_dev->sem, 1); init_waitqueue_head(&pn544_dev->read_queue); /* register this device with the driver core */ /*3. slf note 20110328--->create: sys/class/nfc-dev/pn544 , 注册设备节点/dev/pn544*/ pn544_dev->dev = device_create(pn544_dev_class, &client->dev, MKDEV(pn544_major, pn544_minor), NULL, DEVNAME); if (IS_ERR(pn544_dev->dev)) { printk(KERN_ERR "pn544_probe: device_create() failed\n"); ret = PTR_ERR(pn544_dev->dev); goto err_device_create_failed; } /*4. slf note 20110328--->create: sys/class/nfc-dev/pn544/name */ ret = device_create_file(pn544_dev->dev, &dev_attr_name); if (ret) { goto err_device_create_file_failed; } ret =nxp_pn544_reset(); if (ret < 0) { printk(KERN_ERR "pn544: can't reset device\n"); goto err_device_create_file_failed; } /* set irq/polling mode */ if (client->irq && !pn544_disable_irq) { if(pn544_dev->int_active_low==1) { ret = request_irq(client->irq, pn544_dev_irq_handler, IRQF_TRIGGER_FALLING, client->name, pn544_dev); } else { ret = request_irq(client->irq, pn544_dev_irq_handler, IRQF_TRIGGER_RISING, client->name, pn544_dev); } if (ret == 0) { pn544_dev->use_irq = 1; } else { dev_err(&client->dev, "request_irq failed\n"); } } if (!pn544_dev->use_irq) { hrtimer_init(&pn544_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pn544_dev->timer.function = pn544_dev_timer_handler; hrtimer_start(&pn544_dev->timer, ktime_set(0, pn544_poll_value), HRTIMER_MODE_REL); } printk(KERN_INFO "pn544_probe: Start in %s mode,IRQ=%d,GPIO=%d\n", pn544_dev->use_irq ? "interrupt" : "polling",client->irq,INT_TO_MSM_GPIO(client->irq)); return 0; err_device_create_file_failed: device_destroy(pn544_dev_class, MKDEV(pn544_major, pn544_minor)); err_device_create_failed: kfree(pn544_dev); pn544_dev = NULL; err_alloc_data_failed: err_check_functionality_failed: err_single_device: return ret; }
static int urbtc_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_urbtc *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; size_t buffer_size; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { //err("Out of memory"); goto error; } memset(dev, 0, sizeof(*dev)); kref_init(&dev->kref); sema_init(&dev->limit_sem, WRITES_IN_FLIGHT); //init_MUTEX(&dev->readbuf_sem); sema_init(&dev->readbuf_sem,1); //chg t.miyo init_waitqueue_head(&dev->readbuf_wait); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; dev->readreq_buffer = NULL; dev->readbuf_urb = NULL; dev->readbuf_work = NULL; dev->readbuf_buffered = NULL; dev->readbuf_last_read = 0; dev->readbuf_last_buffered = 0; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; /* dbg("endpoint %d(%d) %s", */ /* endpoint->bEndpointAddress, */ /* endpoint->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK, */ /* endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK? "in": "out"); */ if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { switch (endpoint->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK) { case EP_READREQ: buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); dev->readreq_size = buffer_size; dev->readreq_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!dev->readreq_buffer) { //err("Could not allocate readreq_buffer"); goto error; } break; case EP_READ: buffer_size = sizeof(struct uin); dev->readbuf_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->readbuf_urb) { //err("Could not allocate readbuf_urb"); goto error; } dev->readbuf_size = buffer_size; dev->readbuf_work = kmalloc(buffer_size, GFP_KERNEL); if (!dev->readbuf_work) { //err("Could not allocate readbuf_work"); goto error; } dev->readbuf_buffered = kmalloc(buffer_size, GFP_KERNEL); if (!dev->readbuf_buffered) { //err("Could not allocate readbuf_buffer"); goto error; } usb_fill_bulk_urb(dev->readbuf_urb, dev->udev, usb_rcvbulkpipe(dev->udev, endpoint->bEndpointAddress), dev->readbuf_work, dev->readbuf_size, urbtc_read_bulk_callback, dev); break; default: break; } } if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { switch (endpoint->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK) { case EP_CCMD: buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); dev->write_counter_size = buffer_size; dev->write_counter_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!dev->write_counter_buffer) { //err("Could not allocate write_counter_buffer"); goto error; } break; case EP_SCMD: default: break; } } } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &urbtc_class); if (retval) { /* something prevented us from registering this driver */ //err("Not able to get a minor for this device."); usb_set_intfdata(interface, NULL); goto error; } /* let the user know what node this device is now attached to */ //info("USB Robot device now attached to urbtc - %d", interface->minor); //chg t.miyo // dev_info(&interface->dev,"USB Robot device now attached to urbtc - %d", interface->minor); return 0; error: if (dev) { if (dev->readreq_buffer) kfree(dev->readreq_buffer); if (dev->readbuf_urb) usb_free_urb(dev->readbuf_urb); if (dev->readbuf_work) kfree(dev->readbuf_work); if (dev->readbuf_buffered) kfree(dev->readbuf_buffered); kref_put(&dev->kref, urbtc_delete); } return retval; }
/** Function Name : fifo_module_init Function Type : Module INIT Description : Initialization method of the Kernel module. The method gets invoked when the kernel module is being inserted using the command insmod. */ static int __init fifo_module_init(void) { int ret; printk(KERN_INFO "FIFO:FIFO module is being loaded.\n"); /**Proc FS is created with RD&WR permissions with name fifo_config*/ fifo_config_file_entry = proc_create(FIFO_CONFIG,0777,NULL,&fifo_config_module_fops); /** Condition to verify if fifo_config creation was successful*/ if(fifo_config_file_entry == NULL) { printk(KERN_ALERT "FIFO ERROR: Could not initialize /proc/%s\n",FIFO_CONFIG); /** FILE CREATION PROBLEM */ return -ENOMEM; } /** Registering the Device with a major number as 240 and configuring the file operations associated with it. */ ret = register_chrdev(MAJOR_NUM, FIFO_DEVICE, &fifo_module_fops); /** Condition code to check if the registration was successful.*/ if (ret < 0) { printk(KERN_ALERT "FIFO ERROR: %s failed with %d\n", "Sorry, registering the character device ", MAJOR_NUM); /** Registration error.*/ return ret; } printk(KERN_INFO "FIFO:registered correctly with major number %d\n", MAJOR_NUM); /** Registering device class and associating devices with it.*/ fifoClass = class_create(THIS_MODULE, CLASS_NAME); /** Condition check if the class creation was successful. */ if (IS_ERR(fifoClass)){ /** Unregister the device due to failed class creation. */ unregister_chrdev(MAJOR_NUM, FIFO_DEVICE); printk(KERN_ALERT "FIFO ERROR:Failed to register device class\n"); /** Class creation error.*/ return PTR_ERR(fifoClass); } printk(KERN_INFO "FIFO: device class registered correctly\n"); /** Registering the device driver for the provided device class. The device driver is associated with fifo0 with minor number as 0. */ fifo = device_create(fifoClass, NULL, MKDEV(MAJOR_NUM, MINOR_NUM_FIFO), NULL, FIFO_DEVICE_NAME); /** Condition for error verification during driver creation.*/ if (IS_ERR(fifo)){ /** Class destroyed associated with the device drivers.*/ class_destroy(fifoClass); /** Unregister the device due to failed driver creation. */ unregister_chrdev(MAJOR_NUM, FIFO_DEVICE); printk(KERN_ALERT "FIFO ERROR:Failed to create the device\n"); /** Driver creation error.*/ return PTR_ERR(fifo); } printk(KERN_INFO "FIFO:device class created correctly\n"); /** Device Status flag set to false because device not in use.*/ device_open = 0; /** Default Memory size of queue set to 8*/ mem_alloc_size = fifo_size; /** Queue Allocated with the default size.*/ queue = (struct data_item*)kmalloc(mem_alloc_size*sizeof(struct data_item),GFP_KERNEL); /** Condition to check if the memory allocation was successful.*/ if(!queue) { printk(KERN_ERR "FIFO ERROR:Memory allocation problem.\n"); /** Memory allocation problem.*/ return -ENOMEM; } /** FIFO HEAD Set to FIRST Location. */ head = -1; tail = -1; /** Initializing push and pop counters*/ push = 0; pop = 0; /** Initializing the semaphores */ sema_init(&mutex,1); sema_init(&empty,0); sema_init(&full,mem_alloc_size); sema_init(&producer_mutex,1); sema_init(&consumer_mutex,1); /** Initialize producer consumer counters */ producer_ctr=0; consumer_ctr=0; num_items = 0; num_empty_slots = fifo_size; fill_percentage = 0; /** Successful execution of initialization method. */ return 0; }
osl_t * osl_attach(void *pdev, uint bustype, bool pkttag) { osl_t *osh; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) gfp_t flags; flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; osh = kmalloc(sizeof(osl_t), flags); #else osh = kmalloc(sizeof(osl_t), GFP_ATOMIC); #endif ASSERT(osh); bzero(osh, sizeof(osl_t)); ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); osh->magic = OS_HANDLE_MAGIC; atomic_set(&osh->malloced, 0); osh->failed = 0; osh->dbgmem_list = NULL; osh->pdev = pdev; osh->pub.pkttag = pkttag; osh->bustype = bustype; switch (bustype) { case PCI_BUS: case SI_BUS: case PCMCIA_BUS: osh->pub.mmbus = TRUE; break; case JTAG_BUS: case SDIO_BUS: case USB_BUS: case SPI_BUS: case RPC_BUS: osh->pub.mmbus = FALSE; break; default: ASSERT(FALSE); break; } #if defined(DHD_USE_STATIC_BUF) if (!bcm_static_buf) { if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+ STATIC_BUF_TOTAL_LEN))) { printk("can not alloc static buf!\n"); } else printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); sema_init(&bcm_static_buf->static_sem, 1); bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; } if (!bcm_static_skb) { int i; void *skb_buff_ptr = 0; bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); skb_buff_ptr = dhd_os_prealloc(osh, 4, 0); bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * 16); for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++) bcm_static_skb->pkt_use[i] = 0; sema_init(&bcm_static_skb->osl_pkt_sem, 1); } #endif return osh; }
static int __init EplLinInit(void) { tEplKernel EplRet; int iErr; int iRet; #ifdef CONFIG_DEVFS_FS int nMinorNumber; #endif TRACE0("EPL: + EplLinInit...\n"); TRACE2("EPL: Driver build: %s / %s\n", __DATE__, __TIME__); iRet = 0; // initialize global variables atomic_set(&AtomicEventState_g, EVENT_STATE_INIT); sema_init(&SemaphoreCbEvent_g, 1); init_waitqueue_head(&WaitQueueCbEvent_g); init_waitqueue_head(&WaitQueueProcess_g); init_waitqueue_head(&WaitQueueRelease_g); #ifdef CONFIG_DEVFS_FS // register character device handler TRACE2("EPL: Installing Driver '%s', Version %s...\n", EPLLIN_DRV_NAME, EPL_PRODUCT_VERSION); TRACE0("EPL: (using dynamic major number assignment)\n"); nDrvMajorNumber_g = register_chrdev(0, EPLLIN_DRV_NAME, &EplLinFileOps_g); if (nDrvMajorNumber_g != 0) { TRACE2 ("EPL: Driver '%s' installed successful, assigned MajorNumber=%d\n", EPLLIN_DRV_NAME, nDrvMajorNumber_g); } else { TRACE1 ("EPL: ERROR: Driver '%s' is unable to get a free MajorNumber!\n", EPLLIN_DRV_NAME); iRet = -EIO; goto Exit; } // create device node in DEVFS nMinorNumber = 0; TRACE1("EPL: Creating device node '/dev/%s'...\n", EPLLIN_DEV_NAME); iErr = devfs_mk_cdev(MKDEV(nDrvMajorNumber_g, nMinorNumber), S_IFCHR | S_IRUGO | S_IWUGO, EPLLIN_DEV_NAME); if (iErr == 0) { TRACE1("EPL: Device node '/dev/%s' created successful.\n", EPLLIN_DEV_NAME); } else { TRACE1("EPL: ERROR: unable to create device node '/dev/%s'\n", EPLLIN_DEV_NAME); iRet = -EIO; goto Exit; } #else // register character device handler // only one Minor required TRACE2("EPL: Installing Driver '%s', Version %s...\n", EPLLIN_DRV_NAME, EPL_PRODUCT_VERSION); iRet = alloc_chrdev_region(&nDevNum_g, 0, 1, EPLLIN_DRV_NAME); if (iRet == 0) { TRACE2 ("EPL: Driver '%s' installed successful, assigned MajorNumber=%d\n", EPLLIN_DRV_NAME, MAJOR(nDevNum_g)); } else { TRACE1 ("EPL: ERROR: Driver '%s' is unable to get a free MajorNumber!\n", EPLLIN_DRV_NAME); iRet = -EIO; goto Exit; } // register cdev structure pEpl_cdev_g = cdev_alloc(); pEpl_cdev_g->ops = &EplLinFileOps_g; pEpl_cdev_g->owner = THIS_MODULE; iErr = cdev_add(pEpl_cdev_g, nDevNum_g, 1); if (iErr) { TRACE2("EPL: ERROR %d: Driver '%s' could not be added!\n", iErr, EPLLIN_DRV_NAME); iRet = -EIO; goto Exit; } #endif // create device node in PROCFS EplRet = EplLinProcInit(); if (EplRet != kEplSuccessful) { goto Exit; } Exit: TRACE1("EPL: - EplLinInit (iRet=%d)\n", iRet); return (iRet); }
static int frandom_init_module(void) { int result; /* The buffer size MUST be at least 256 bytes, because we assume that minimal length in init_rand_state(). */ if (frandom_bufsize < 256) { printk(KERN_ERR "frandom: Refused to load because frandom_bufsize=%d < 256\n",frandom_bufsize); return -EINVAL; } if ((frandom_chunklimit != 0) && (frandom_chunklimit < 256)) { printk(KERN_ERR "frandom: Refused to load because frandom_chunklimit=%d < 256 and != 0\n",frandom_chunklimit); return -EINVAL; } erandom_state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL); if (!erandom_state) return -ENOMEM; /* This specific buffer is only used for seeding, so we need 256 bytes exactly */ erandom_state->buf = kmalloc(256, GFP_KERNEL); if (!erandom_state->buf) { kfree(erandom_state); return -ENOMEM; } sema_init(&erandom_state->sem, 1); /* Init semaphore as a mutex */ erandom_seeded = 0; frandom_class = class_create(THIS_MODULE, "fastrng"); if (IS_ERR(frandom_class)) { result = PTR_ERR(frandom_class); printk(KERN_WARNING "frandom: Failed to register class fastrng\n"); goto error0; } /* * Register your major, and accept a dynamic number. This is the * first thing to do, in order to avoid releasing other module's * fops in frandom_cleanup_module() */ cdev_init(&frandom_cdev, &frandom_fops); frandom_cdev.owner = THIS_MODULE; result = cdev_add(&frandom_cdev, MKDEV(frandom_major, frandom_minor), 1); if (result) { printk(KERN_WARNING "frandom: Failed to add cdev for /dev/frandom\n"); goto error1; } result = register_chrdev_region(MKDEV(frandom_major, frandom_minor), 1, "/dev/frandom"); if (result < 0) { printk(KERN_WARNING "frandom: can't get major/minor %d/%d\n", frandom_major, frandom_minor); goto error2; } frandom_device = device_create(frandom_class, NULL, MKDEV(frandom_major, frandom_minor), NULL, "frandom"); if (IS_ERR(frandom_device)) { printk(KERN_WARNING "frandom: Failed to create frandom device\n"); goto error3; } cdev_init(&erandom_cdev, &frandom_fops); erandom_cdev.owner = THIS_MODULE; result = cdev_add(&erandom_cdev, MKDEV(frandom_major, erandom_minor), 1); if (result) { printk(KERN_WARNING "frandom: Failed to add cdev for /dev/erandom\n"); goto error4; } result = register_chrdev_region(MKDEV(frandom_major, erandom_minor), 1, "/dev/erandom"); if (result < 0) { printk(KERN_WARNING "frandom: can't get major/minor %d/%d\n", frandom_major, erandom_minor); goto error5; } erandom_device = device_create(frandom_class, NULL, MKDEV(frandom_major, erandom_minor), NULL, "erandom"); if (IS_ERR(erandom_device)) { printk(KERN_WARNING "frandom: Failed to create erandom device\n"); goto error6; } return 0; /* succeed */ error6: unregister_chrdev_region(MKDEV(frandom_major, erandom_minor), 1); error5: cdev_del(&erandom_cdev); error4: device_destroy(frandom_class, MKDEV(frandom_major, frandom_minor)); error3: unregister_chrdev_region(MKDEV(frandom_major, frandom_minor), 1); error2: cdev_del(&frandom_cdev); error1: class_destroy(frandom_class); error0: kfree(erandom_state->buf); kfree(erandom_state); return result; }
struct net_device * islpci_setup(struct pci_dev *pdev) { islpci_private *priv; struct net_device *ndev = alloc_etherdev(sizeof (islpci_private)); if (!ndev) return ndev; SET_MODULE_OWNER(ndev); pci_set_drvdata(pdev, ndev); #if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); #endif /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); ndev->irq = pdev->irq; /* initialize the function pointers */ ndev->open = &islpci_open; ndev->stop = &islpci_close; ndev->get_stats = &islpci_statistics; ndev->do_ioctl = &prism54_ioctl; ndev->wireless_handlers = (struct iw_handler_def *) &prism54_handler_def; ndev->ethtool_ops = &islpci_ethtool_ops; ndev->hard_start_xmit = &islpci_eth_transmit; /* ndev->set_multicast_list = &islpci_set_multicast_list; */ ndev->addr_len = ETH_ALEN; ndev->set_mac_address = &prism54_set_mac_address; /* Get a non-zero dummy MAC address for nameif. Jean II */ memcpy(ndev->dev_addr, dummy_mac, 6); #ifdef HAVE_TX_TIMEOUT ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT; ndev->tx_timeout = &islpci_eth_tx_timeout; #endif /* allocate a private device structure to the network device */ priv = netdev_priv(ndev); priv->ndev = ndev; priv->pdev = pdev; priv->monitor_type = ARPHRD_IEEE80211; priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? priv->monitor_type : ARPHRD_ETHER; /* Add pointers to enable iwspy support. */ priv->wireless_data.spy_data = &priv->spy_data; ndev->wireless_data = &priv->wireless_data; /* save the start and end address of the PCI memory area */ ndev->mem_start = (unsigned long) priv->device_base; ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base); #endif init_waitqueue_head(&priv->reset_done); /* init the queue read locks, process wait counter */ sema_init(&priv->mgmt_sem, 1); priv->mgmt_received = NULL; init_waitqueue_head(&priv->mgmt_wqueue); sema_init(&priv->stats_sem, 1); spin_lock_init(&priv->slock); /* init state machine with off#1 state */ priv->state = PRV_STATE_OFF; priv->state_off = 1; /* initialize workqueue's */ INIT_WORK(&priv->stats_work, prism54_update_stats); priv->stats_timestamp = 0; INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); priv->reset_task_pending = 0; /* allocate various memory areas */ if (islpci_alloc_memory(priv)) goto do_free_netdev; /* select the firmware file depending on the device id */ switch (pdev->device) { case 0x3877: strcpy(priv->firmware, ISL3877_IMAGE_FILE); break; case 0x3886: strcpy(priv->firmware, ISL3886_IMAGE_FILE); break; default: strcpy(priv->firmware, ISL3890_IMAGE_FILE); break; } if (register_netdev(ndev)) { DEBUG(SHOW_ERROR_MESSAGES, "ERROR: register_netdev() failed \n"); goto do_islpci_free_memory; } return ndev; do_islpci_free_memory: islpci_free_memory(priv); do_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; return NULL; }
static struct ieee80211_hw *cw1200_init_common(size_t priv_data_len) { int i; struct ieee80211_hw *hw; struct cw1200_common *priv; struct ieee80211_supported_band *sband; int band; hw = ieee80211_alloc_hw(priv_data_len, &cw1200_ops); if (!hw) return NULL; priv = hw->priv; priv->hw = hw; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->rates = cw1200_rates; /* TODO: fetch from FW */ priv->mcs_rates = cw1200_n_rates; /* Enable block ACK for every TID but voice. */ priv->ba_tid_mask = 0x3F; hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_UAPSD | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_SUPPORTS_CQM_RSSI | /* Aggregation is fully controlled by firmware. * Do not need any support from the mac80211 stack */ /* IEEE80211_HW_AMPDU_AGGREGATION | */ #if defined(CONFIG_CW1200_USE_STE_EXTENSIONS) IEEE80211_HW_SUPPORTS_P2P_PS | IEEE80211_HW_SUPPORTS_CQM_BEACON_MISS | IEEE80211_HW_SUPPORTS_CQM_TX_FAIL | #endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */ IEEE80211_HW_BEACON_FILTER; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); /* Support only for limited wowlan functionalities */ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT; hw->wiphy->wowlan.n_patterns = 0; #if defined(CONFIG_CW1200_USE_STE_EXTENSIONS) hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; #endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */ #if defined(CONFIG_CW1200_DISABLE_BEACON_HINTS) || defined(CONFIG_CW1200_DISABLE_REGULATORY_HINT_11D) hw->wiphy->flags |= WIPHY_FLAG_DISABLE_BEACON_HINTS; #endif hw->channel_change_time = 1000; /* TODO: find actual value */ /* priv->beacon_req_id = cpu_to_le32(0); */ hw->queues = 4; priv->noise = -94; priv->rts_threshold = -1; hw->max_rates = 8; hw->max_rate_tries = 15; hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM + 8 /* TKIP IV */ + 12 /* TKIP ICV and MIC */; hw->sta_data_size = sizeof(struct cw1200_sta_priv); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; #ifdef CONFIG_CW1200_5GHZ_SUPPORT hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; #endif /* CONFIG_CW1200_5GHZ_SUPPORT */ /* Channel params have to be cleared before registering wiphy again */ for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = hw->wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { sband->channels[i].flags = 0; sband->channels[i].max_antenna_gain = 0; sband->channels[i].max_power = 30; } } hw->wiphy->max_scan_ssids = 2; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template); if (hw->wiphy->perm_addr[3] == 0 && hw->wiphy->perm_addr[4] == 0 && hw->wiphy->perm_addr[5] == 0) { get_random_bytes(&hw->wiphy->perm_addr[3], 3); } mutex_init(&priv->wsm_cmd_mux); mutex_init(&priv->conf_mutex); priv->workqueue = create_singlethread_workqueue("cw1200_wq"); sema_init(&priv->scan.lock, 1); INIT_WORK(&priv->scan.work, cw1200_scan_work); INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout); INIT_DELAYED_WORK(&priv->clear_recent_scan_work, cw1200_clear_recent_scan_work); INIT_WORK(&priv->join_work, cw1200_join_work); INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout); INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work); INIT_WORK(&priv->offchannel_work, cw1200_offchannel_work); INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work); INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work); INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work); spin_lock_init(&priv->event_queue_lock); INIT_LIST_HEAD(&priv->event_queue); INIT_WORK(&priv->event_handler, cw1200_event_handler); INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work); INIT_DELAYED_WORK(&priv->connection_loss_work, cw1200_connection_loss_work); spin_lock_init(&priv->bss_loss_lock); INIT_WORK(&priv->tx_failure_work, cw1200_tx_failure_work); spin_lock_init(&priv->ps_state_lock); INIT_DELAYED_WORK(&priv->set_cts_work, cw1200_set_cts_work); INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work); INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work); INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work); INIT_WORK(&priv->link_id_work, cw1200_link_id_work); INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work); #if defined(CONFIG_CW1200_USE_STE_EXTENSIONS) INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset); #endif INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); INIT_WORK(&priv->set_beacon_wakeup_period_work, cw1200_set_beacon_wakeup_period_work); init_timer(&priv->mcast_timeout); priv->mcast_timeout.data = (unsigned long)priv; priv->mcast_timeout.function = cw1200_mcast_timeout; if (unlikely(cw1200_queue_stats_init(&priv->tx_queue_stats, CW1200_LINK_ID_MAX, cw1200_skb_dtor, priv))) { ieee80211_free_hw(hw); return NULL; } for (i = 0; i < 4; ++i) { if (unlikely(cw1200_queue_init(&priv->tx_queue[i], &priv->tx_queue_stats, i, 16, cw1200_ttl[i]))) { for (; i > 0; i--) cw1200_queue_deinit(&priv->tx_queue[i - 1]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); ieee80211_free_hw(hw); return NULL; } } init_waitqueue_head(&priv->channel_switch_done); init_waitqueue_head(&priv->wsm_cmd_wq); init_waitqueue_head(&priv->wsm_startup_done); init_waitqueue_head(&priv->ps_mode_switch_done); wsm_buf_init(&priv->wsm_cmd_buf); spin_lock_init(&priv->wsm_cmd.lock); tx_policy_init(priv); #if defined(CONFIG_CW1200_WSM_DUMPS_SHORT) priv->wsm_dump_max_size = 20; #endif /* CONFIG_CW1200_WSM_DUMPS_SHORT */ /* WSM callbacks. */ priv->wsm_cbc.scan_complete = cw1200_scan_complete_cb; priv->wsm_cbc.join_complete = cw1200_join_complete_cb; priv->wsm_cbc.tx_confirm = cw1200_tx_confirm_cb; priv->wsm_cbc.rx = cw1200_rx_cb; priv->wsm_cbc.suspend_resume = cw1200_suspend_resume; /* priv->wsm_cbc.set_pm_complete = cw1200_set_pm_complete_cb; */ priv->wsm_cbc.channel_switch = cw1200_channel_switch_cb; #ifdef CONFIG_CW1200_BINARY_LOGGING /* We want to see it in IDD reports by default */ priv->wsm_enable_wsm_dumps = 1; #endif /* CONFIG_CW1200_BINARY_LOGGING */ return hw; }
/* * create a new virtual interface with the given name */ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, char *name, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); struct mwifiex_private *priv; struct net_device *dev; void *mdev_priv; struct wireless_dev *wdev; if (!adapter) return ERR_PTR(-EFAULT); switch (type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: priv = adapter->priv[MWIFIEX_BSS_TYPE_STA]; if (priv->bss_mode) { wiphy_err(wiphy, "cannot create multiple sta/adhoc ifaces\n"); return ERR_PTR(-EINVAL); } wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); wdev->wiphy = wiphy; priv->wdev = wdev; wdev->iftype = NL80211_IFTYPE_STATION; if (type == NL80211_IFTYPE_UNSPECIFIED) priv->bss_mode = NL80211_IFTYPE_STATION; else priv->bss_mode = type; priv->bss_type = MWIFIEX_BSS_TYPE_STA; priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; priv->bss_priority = MWIFIEX_BSS_ROLE_STA; priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_num = 0; break; case NL80211_IFTYPE_AP: priv = adapter->priv[MWIFIEX_BSS_TYPE_UAP]; if (priv->bss_mode) { wiphy_err(wiphy, "Can't create multiple AP interfaces"); return ERR_PTR(-EINVAL); } wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); priv->wdev = wdev; wdev->wiphy = wiphy; wdev->iftype = NL80211_IFTYPE_AP; priv->bss_type = MWIFIEX_BSS_TYPE_UAP; priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; priv->bss_priority = MWIFIEX_BSS_ROLE_UAP; priv->bss_role = MWIFIEX_BSS_ROLE_UAP; priv->bss_started = 0; priv->bss_num = 0; priv->bss_mode = type; break; default: wiphy_err(wiphy, "type not supported\n"); return ERR_PTR(-EINVAL); } dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name, ether_setup, 1); if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return ERR_PTR(-ENOMEM); } mwifiex_init_priv_params(priv, dev); priv->netdev = dev; mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv); if (adapter->config_bands & BAND_A) mwifiex_setup_ht_caps( &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv); dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = priv->wdev; dev->ieee80211_ptr->iftype = priv->bss_mode; memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT; dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN; mdev_priv = netdev_priv(dev); *((unsigned long *) mdev_priv) = (unsigned long) priv; SET_NETDEV_DEV(dev, adapter->dev); /* Register network device */ if (register_netdevice(dev)) { wiphy_err(wiphy, "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return ERR_PTR(-EFAULT); } sema_init(&priv->async_sem, 1); priv->scan_pending_on_block = false; dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); #endif return dev; }
static int retina_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_retina *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; size_t buffer_size; int i; int retval = -ENOMEM; struct urb *urb; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { dev_err(&interface->dev,"Out of memory"); goto error; } kref_init(&dev->kref); sema_init(&dev->limit_sem, WRITES_IN_FLIGHT); mutex_init(&dev->io_mutex); spin_lock_init(&dev->err_lock); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* set up the endpoint information */ /* use only the first bulk-in endpoint */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; /*dev_info(&interface->dev,"usb endpoint[%d] found. size=%d, addr=0x%x",i,endpoint->wMaxPacketSize,endpoint->bEndpointAddress);*/ if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint) && (endpoint->wMaxPacketSize==512)) { if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!dev->bulk_in_buffer) { dev_err(&interface->dev,"Could not allocate bulk_in_buffer"); goto error; } } } } if (!(dev->bulk_in_endpointAddr)) { dev_err(&interface->dev,"Could not find bulk-in endpoint\n"); goto error; } dev->event_counter = 0; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &retina_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev,"Not able to get a minor for this device."); usb_set_intfdata(interface, NULL); goto error; } /* let the user know what node this device is now attached to */ dev_info(&interface->dev,"retina now attached to /dev/retina0\n"); vendorRequest(dev, VENDOR_REQUEST_START_TRANSFER, 0, 0, NULL, 0); /*dev_info(&dev->interface->dev,"VENDOR_REQUEST_START_TRANSFER returned %d",retval);*/ urb = usb_alloc_urb (0, GFP_KERNEL); if (!urb) goto error; usb_fill_bulk_urb (urb, dev->udev, usb_rcvbulkpipe (dev->udev, dev->bulk_in_endpointAddr), dev->bulk_in_buffer, dev->bulk_in_size, address_event_callback, dev); dev->urb = urb; retval = usb_submit_urb (dev->urb, GFP_KERNEL); if (retval) { dev_err(&interface->dev, "error getting ae data: %d",retval); goto error; } return 0; error: if (dev) /* this frees allocated memory */ kref_put(&dev->kref, retina_delete); return retval; }
static int simple_init(void) { int result = 0; static dev_t dev_no; printk(KERN_INFO "%s()\n", __func__); result = kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL); if (result) { printk(KERN_ERR "error kfifo_alloc\n"); goto kfifo_alloc_err; } printk(KERN_INFO "%s() : kfifo size = %lu\n", __func__, FIFO_SIZE); result = alloc_chrdev_region(&dev_no, 0, 2, "simple"); if (result < 0) { printk(KERN_WARNING "simple: unable to get major %d\n", simple_major); result = -EPERM; goto alloc_chedev_region_fail; } simple_major = MAJOR(dev_no); simple_minor = MINOR(dev_no); printk(KERN_INFO "%s() : Simple driver for 2 devices\n", __func__); printk(KERN_INFO "Major=%d,Minor=%d\n", simple_major, simple_minor); printk(KERN_INFO "MajoR=%d,Minor=%d\n", simple_major, (simple_minor+1)); cl = class_create(THIS_MODULE, "chardrv"); if (cl == NULL) { result = -EPERM; goto class_create_fail; } /* Now set up two cdevs. */ result = simple_setup_cdev(simple_cdev, 0, &prod_ops, "prod"); if (result < 0) { if (result == -DEVADDERR) goto dev_add_err_0; else goto dev_create_err_0; } result = simple_setup_cdev(simple_cdev + 1, 1, &cons_ops, "cons"); if (result < 0) { if (result == -DEVADDERR) goto dev_add_err_1; else goto dev_create_err_1; } sema_init(&prod_sem, 1); sema_init(&cons_sem, 1); init_waitqueue_head(&prod_que); init_waitqueue_head(&cons_que); goto init_success; dev_add_err_1: cdev_del(simple_cdev + 1); dev_create_err_1: dev_add_err_0: cdev_del(simple_cdev); dev_create_err_0: class_create_fail: unregister_chrdev_region(MKDEV(simple_major, simple_minor), 2); alloc_chedev_region_fail: kfifo_alloc_err: result = -1; init_success: return result; }
static int s5pv210_key_init(void) { int ret; int i; char buf[64]; //1.分配设备号 //int alloc_chrdev_region(dev_t *dev, unsigned baseminor,unsigned count,char *name) ret = alloc_chrdev_region(&devno, baseminor, count, name); if(ret < 0) { printk("alloc_chrdev_region error\n"); } //2.分配字符设备结构体 cdev = cdev_alloc(); if(cdev == NULL) { printk("cdev_alloc error\n"); goto out; } //3.初始化字符设备结构体 //void cdev_init(struct cdev *cdev, const struct file_operations *fops) cdev_init(cdev, &fops); //4.添加到系统 //int cdev_add(struct cdev *p, dev_t dev, unsigned count) ret = cdev_add(cdev, devno, count); if(ret < 0) { printk("cdev_add error\n"); goto out1; } //5.创建设备文件 cls = class_create(THIS_MODULE, name); //执行成功,会创建 /sys/class/name if(IS_ERR(cls)) { printk("class_create error\n"); goto out2; } //创建两个设备文件 key0 led1 for(i = 0; i < count; i++) { sprintf(buf, "%s%d", name, i); //格式化字符串 device_create(cls, NULL, MKDEV(MAJOR(devno), i), NULL, buf); } //初始化信号量 sema_init(&sem, 1); return 0; out2: //void cdev_del(struct cdev *p) cdev_del(cdev); out1: kobject_put(&cdev->kobj); //把cdev对象的引用计数 -1 out: //void unregister_chrdev_region(dev_t from, unsigned count) unregister_chrdev_region(devno, count); return -1; }
static int audio_setup_buf(audio_stream_t * s) { int frag; int dmasize = 0; char *dmabuf = NULL; dma_addr_t dmaphys = 0; if (s->buffers) return -EBUSY; s->buffers = kmalloc(sizeof(audio_buf_t) * s->nbfrags, GFP_KERNEL); if (!s->buffers) goto err; memset(s->buffers, 0, sizeof(audio_buf_t) * s->nbfrags); for (frag = 0; frag < s->nbfrags; frag++) { audio_buf_t *b = &s->buffers[frag]; /* * Let's allocate non-cached memory for DMA buffers. * We try to allocate all memory at once. * If this fails (a common reason is memory fragmentation), * then we allocate more smaller buffers. */ if (!dmasize) { dmasize = (s->nbfrags - frag) * s->fragsize; do { dmabuf = consistent_alloc(GFP_KERNEL|GFP_DMA, dmasize, &dmaphys); if (!dmabuf) dmasize -= s->fragsize; } while (!dmabuf && dmasize); if (!dmabuf) goto err; b->master = dmasize; memzero(dmabuf, dmasize); } b->data = dmabuf; b->dma_addr = dmaphys; DPRINTK("buf %d: start %p dma %#08x master %d fragsize %d\n", frag, b->data, b->dma_addr, b->master, s->fragsize); dmabuf += s->fragsize; dmaphys += s->fragsize; dmasize -= s->fragsize; } s->usr_head = s->dma_head = s->dma_tail = 0; s->bytecount = 0; s->fragcount = 0; sema_init(&s->sem, s->nbfrags); return 0; err: printk(AUDIO_NAME ": unable to allocate audio memory\n "); audio_discard_buf(s); return -ENOMEM; }
static int __init my_semaphore_init(void) { printk("%s\n",__stringify(KBUILD_MODNAME)); sema_init(&my_sema, 2); return 0; }
static int __init po188_init(void) { int err; int ret; struct kobject *kobj = NULL; po188_driver.dev.name = PO188_DEV_NAME; po188_driver.dev.minor = MISC_DYNAMIC_MINOR; po188_driver.fops.open = po188_open; po188_driver.fops.release = po188_release; /* Begin: leyihua modified for Linux Kernel 3.0, begin 2011/11/26 */ po188_driver.fops.unlocked_ioctl = po188_ioctl; /* End: leyihua modified for Linux Kernel 3.0, end 2011/11/26 */ po188_driver.dev.fops = &po188_driver.fops; po188_driver.last_voltage = 0; po188_driver.current_voltage = 0; po188_driver.vol_flag = false; if ((err = misc_register(&po188_driver.dev))) { PO188_ERRMSG("misc_register() failed"); goto EXIT_ERROR; } /* Begin: leyihua modified for Linux Kernel 3.0, begin 2011/11/26 */ sema_init(&(po188_driver.run_sem), 1); /* End: leyihua modified for Linux Kernel 3.0, begin 2011/11/26 */ po188_driver.po188_wq = create_singlethread_workqueue("po188_wq"); if (!po188_driver.po188_wq) { printk(KERN_ERR "***********************%s: create workque failed \n", __func__); return -ENOMEM; } init_timer(&po188_driver.timer); po188_driver.timer.expires = jiffies + SENSOR_POLLING_JIFFIES; po188_driver.timer.data = 0; po188_driver.timer.function = po188_start_cb_thread; add_timer(&po188_driver.timer); /* G-sensor & Light share input dev. suchangyu. 20100513. begin. */ if (sensor_dev == NULL) { po188_driver.input_dev = input_allocate_device(); if (po188_driver.input_dev == NULL) { printk(KERN_ERR "po188_init : Failed to allocate input device\n"); return -1; } /* Modified by suchangyu. 20100513. begin. */ po188_driver.input_dev->name = "sensors"; // "light_sensor" /* Modified by suchangyu. 20100513. end. */ input_set_drvdata(po188_driver.input_dev, &po188_driver); ret = input_register_device(po188_driver.input_dev); if (ret) { printk(KERN_ERR "[%s]Unable to register %s input device\n", __func__,po188_driver.input_dev->name); return -1; } sensor_dev = po188_driver.input_dev; } else { po188_driver.input_dev = sensor_dev; } /* G-sensor & Light share input dev. suchangyu. 20100513. end. */ /* Modified by suchangyu. 20100513. begin. */ set_bit(EV_ABS,po188_driver.input_dev->evbit); set_bit(ABS_MISC, po188_driver.input_dev->absbit); set_bit(EV_SYN,po188_driver.input_dev->evbit); po188_driver.early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; po188_driver.early_suspend.suspend = Po188_suspend; po188_driver.early_suspend.resume = Po188_resume; register_early_suspend(&po188_driver.early_suspend); kobj = kobject_create_and_add("po188", NULL); if (kobj == NULL) { return -1; } if (sysfs_create_group(kobj, &po188_defattr_group)) { kobject_put(kobj); return -1; } return 0; EXIT_ERROR: /* Modified by suchangyu. 20100514. begin */ kobject_put(kobj); sysfs_remove_group(kobj, &po188_defattr_group); /* Modified by suchangyu. 20100514. end */ return err; }
/* * This routine actually sets the video mode. It's in here where we * the hardware state info->par and fix which can be affected by the * change in par. For this driver it doesn't do much. * */ static int mxc_elcdif_fb_set_par(struct fb_info *fbi) { struct mxc_elcdif_fb_data *data = (struct mxc_elcdif_fb_data *)fbi->par; struct elcdif_signal_cfg sig_cfg; int mem_len; dev_dbg(fbi->device, "Reconfiguring framebuffer\n"); /* If parameter no change, don't reconfigure. */ if (mxc_elcdif_fb_par_equal(fbi, data)) return 0; sema_init(&data->flip_sem, 1); /* release prev panel */ if (!g_elcdif_pix_clk_enable) { clk_enable(g_elcdif_pix_clk); g_elcdif_pix_clk_enable = true; } mxc_elcdif_blank_panel(FB_BLANK_POWERDOWN); mxc_elcdif_stop(); release_dotclk_panel(); mxc_elcdif_dma_release(); mxc_elcdif_fb_set_fix(fbi); if (g_elcdif_pix_clk_enable) { clk_disable(g_elcdif_pix_clk); g_elcdif_pix_clk_enable = false; } mem_len = fbi->var.yres_virtual * fbi->fix.line_length; if (!fbi->fix.smem_start || (mem_len > fbi->fix.smem_len)) { if (fbi->fix.smem_start) mxc_elcdif_fb_unmap_video_memory(fbi); if (mxc_elcdif_fb_map_video_memory(fbi) < 0) return -ENOMEM; } if (data->next_blank != FB_BLANK_UNBLANK) return 0; /* init next panel */ if (!g_elcdif_pix_clk_enable) { clk_enable(g_elcdif_pix_clk); g_elcdif_pix_clk_enable = true; } mxc_init_elcdif(); mxc_elcdif_init_panel(); dev_dbg(fbi->device, "pixclock = %u Hz\n", (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL)); memset(&sig_cfg, 0, sizeof(sig_cfg)); if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) sig_cfg.Hsync_pol = true; if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) sig_cfg.Vsync_pol = true; if (fbi->var.sync & FB_SYNC_CLK_LAT_FALL) sig_cfg.clk_pol = true; if (!(fbi->var.sync & FB_SYNC_OE_LOW_ACT)) sig_cfg.enable_pol = true; setup_dotclk_panel((PICOS2KHZ(fbi->var.pixclock)) * 1000UL, fbi->var.vsync_len, fbi->var.upper_margin + fbi->var.yres + fbi->var.lower_margin, fbi->var.upper_margin, fbi->var.yres, fbi->var.hsync_len, fbi->var.left_margin + fbi->var.xres + fbi->var.right_margin, fbi->var.left_margin, fbi->var.xres, bpp_to_pixfmt(fbi), data->output_pix_fmt, sig_cfg, 1); mxc_elcdif_frame_addr_setup(fbi->fix.smem_start); mxc_elcdif_run(); mxc_elcdif_blank_panel(FB_BLANK_UNBLANK); fbi->mode = (struct fb_videomode *)fb_match_mode(&fbi->var, &fbi->modelist); data->var = fbi->var; return 0; }
static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context * fibctx; int status; fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); if (fibctx == NULL) { status = -ENOMEM; } else { unsigned long flags; struct list_head * entry; struct aac_fib_context * context; fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; fibctx->size = sizeof(struct aac_fib_context); /* * Yes yes, I know this could be an index, but we have a * better guarantee of uniqueness for the locked loop below. * Without the aid of a persistent history, this also helps * reduce the chance that the opaque context would be reused. */ fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); /* * Initialize the mutex used to wait for the next AIF. */ sema_init(&fibctx->wait_sem, 0); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on * the list to 0. */ fibctx->count = 0; INIT_LIST_HEAD(&fibctx->fib_list); fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's * AdapterFibContext list. */ spin_lock_irqsave(&dev->fib_lock, flags); /* Ensure that we have a unique identifier */ entry = dev->fib_list.next; while (entry != &dev->fib_list) { context = list_entry(entry, struct aac_fib_context, next); if (context->unique == fibctx->unique) { /* Not unique (32 bits) */ fibctx->unique++; entry = dev->fib_list.next; } else { entry = entry->next; } } list_add_tail(&fibctx->next, &dev->fib_list); spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(arg, &fibctx->unique, sizeof(fibctx->unique))) { status = -EFAULT; } else { status = 0; } } return status; }
/******************************************************************************* ** ** gckGALDEVICE_Construct ** ** Constructor. ** ** INPUT: ** ** OUTPUT: ** ** gckGALDEVICE * Device ** Pointer to a variable receiving the gckGALDEVICE object pointer on ** success. */ gceSTATUS gckGALDEVICE_Construct( IN gctINT IrqLine, IN gctUINT32 RegisterMemBase, IN gctSIZE_T RegisterMemSize, IN gctINT IrqLine2D, IN gctUINT32 RegisterMemBase2D, IN gctSIZE_T RegisterMemSize2D, IN gctINT IrqLineVG, IN gctUINT32 RegisterMemBaseVG, IN gctSIZE_T RegisterMemSizeVG, IN gctUINT32 ContiguousBase, IN gctSIZE_T ContiguousSize, IN gctSIZE_T BankSize, IN gctINT FastClear, IN gctINT Compression, IN gctUINT32 PhysBaseAddr, IN gctUINT32 PhysSize, IN gctINT Signal, IN gctUINT LogFileSize, IN struct device *pdev, IN gctINT PowerManagement, OUT gckGALDEVICE *Device ) { gctUINT32 internalBaseAddress = 0, internalAlignment = 0; gctUINT32 externalBaseAddress = 0, externalAlignment = 0; gctUINT32 horizontalTileSize, verticalTileSize; struct resource* mem_region; gctUINT32 physAddr; gctUINT32 physical; gckGALDEVICE device; gceSTATUS status; gctINT32 i; gceHARDWARE_TYPE type; gckDB sharedDB = gcvNULL; gckKERNEL kernel = gcvNULL; gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u " "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u " "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u " "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu " "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d", IrqLine, RegisterMemBase, RegisterMemSize, IrqLine2D, RegisterMemBase2D, RegisterMemSize2D, IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG, ContiguousBase, ContiguousSize, BankSize, FastClear, Compression, PhysBaseAddr, PhysSize, Signal); /* Allocate device structure. */ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN); if (!device) { gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY); } memset(device, 0, sizeof(struct _gckGALDEVICE)); device->dbgnode = gcvNULL; if(LogFileSize != 0) { if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to create the debug file system %s/%s \n", __FUNCTION__, __LINE__, PARENT_FILE, DEBUG_FILE ); } else { /*Everything is OK*/ gckDebugFileSystemSetCurrentNode(device->dbgnode); } } #ifdef CONFIG_PM /*Init runtime pm for gpu*/ pm_runtime_enable(pdev); device->pmdev = pdev; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) /*get gpu regulator*/ device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu"); if (IS_ERR(device->gpu_regulator)) { gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to get gpu regulator %s/%s \n", __FUNCTION__, __LINE__, PARENT_FILE, DEBUG_FILE); gcmkONERROR(gcvSTATUS_NOT_FOUND); } #endif /*Initialize the clock structure*/ if (IrqLine != -1) { device->clk_3d_core = clk_get(pdev, "gpu3d_clk"); if (!IS_ERR(device->clk_3d_core)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) if (cpu_is_mx6q()) { device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(device->clk_3d_shader)) { IrqLine = -1; clk_put(device->clk_3d_core); device->clk_3d_core = NULL; device->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } } #else device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk"); device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk"); if (IS_ERR(device->clk_3d_shader)) { IrqLine = -1; clk_put(device->clk_3d_core); device->clk_3d_core = NULL; device->clk_3d_shader = NULL; gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n"); } #endif } else { IrqLine = -1; device->clk_3d_core = NULL; gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n"); } } if ((IrqLine2D != -1) || (IrqLineVG != -1)) { device->clk_2d_core = clk_get(pdev, "gpu2d_clk"); if (IS_ERR(device->clk_2d_core)) { IrqLine2D = -1; IrqLineVG = -1; device->clk_2d_core = NULL; gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n"); } else { if (IrqLine2D != -1) { device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk"); if (IS_ERR(device->clk_2d_axi)) { device->clk_2d_axi = NULL; IrqLine2D = -1; gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n"); } } if (IrqLineVG != -1) { device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk"); if (IS_ERR(device->clk_vg_axi)) { IrqLineVG = -1; device->clk_vg_axi = NULL; gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n"); } } } } if (IrqLine != -1) { device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase; device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize; } if (IrqLine2D != -1) { device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D; device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D; } if (IrqLineVG != -1) { device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG; device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG; } device->requestedContiguousBase = 0; device->requestedContiguousSize = 0; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { physical = device->requestedRegisterMemBases[i]; /* Set up register memory region. */ if (physical != 0) { mem_region = request_mem_region( physical, device->requestedRegisterMemSizes[i], "galcore register region" ); if (mem_region == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to claim %lu bytes @ 0x%08X\n", __FUNCTION__, __LINE__, physical, device->requestedRegisterMemSizes[i] ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->registerBases[i] = (gctPOINTER) ioremap_nocache( physical, device->requestedRegisterMemSizes[i]); if (device->registerBases[i] == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Unable to map %ld bytes @ 0x%08X\n", __FUNCTION__, __LINE__, physical, device->requestedRegisterMemSizes[i] ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } physical += device->requestedRegisterMemSizes[i]; } else { device->registerBases[i] = gcvNULL; } } /* Set the base address */ device->baseAddress = PhysBaseAddr; /* Construct the gckOS object. */ gcmkONERROR(gckOS_Construct(device, &device->os)); if (IrqLine != -1) { /* Construct the gckKERNEL object. */ gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_MAJOR, device, gcvNULL, &device->kernels[gcvCORE_MAJOR])); sharedDB = device->kernels[gcvCORE_MAJOR]->db; /* Initialize core mapping */ for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_MAJOR; } /* Setup the ISR manager. */ gcmkONERROR(gckHARDWARE_SetIsrManager( device->kernels[gcvCORE_MAJOR]->hardware, (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR, (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR, device )); gcmkONERROR(gckHARDWARE_SetFastClear( device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression )); gcmkONERROR(gckHARDWARE_SetPowerManagement( device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement )); #if COMMAND_PROCESSOR_VERSION == 1 /* Start the command queue. */ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command)); #endif } else { device->kernels[gcvCORE_MAJOR] = gcvNULL; } if (IrqLine2D != -1) { gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_2D, device, sharedDB, &device->kernels[gcvCORE_2D])); if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db; /* Verify the hardware type */ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type)); if (type != gcvHARDWARE_2D) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Unexpected hardware type: %d\n", __FUNCTION__, __LINE__, type ); gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } /* Initialize core mapping */ if (device->kernels[gcvCORE_MAJOR] == gcvNULL) { for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_2D; } } else { device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D; } /* Setup the ISR manager. */ gcmkONERROR(gckHARDWARE_SetIsrManager( device->kernels[gcvCORE_2D]->hardware, (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D, (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D, device )); gcmkONERROR(gckHARDWARE_SetPowerManagement( device->kernels[gcvCORE_2D]->hardware, PowerManagement )); #if COMMAND_PROCESSOR_VERSION == 1 /* Start the command queue. */ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command)); #endif } else { device->kernels[gcvCORE_2D] = gcvNULL; } if (IrqLineVG != -1) { #if gcdENABLE_VG gcmkONERROR(gckKERNEL_Construct( device->os, gcvCORE_VG, device, sharedDB, &device->kernels[gcvCORE_VG])); /* Initialize core mapping */ if (device->kernels[gcvCORE_MAJOR] == gcvNULL && device->kernels[gcvCORE_2D] == gcvNULL ) { for (i = 0; i < 8; i++) { device->coreMapping[i] = gcvCORE_VG; } } else { device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG; } gcmkONERROR(gckVGHARDWARE_SetPowerManagement( device->kernels[gcvCORE_VG]->vg->hardware, PowerManagement )); #endif } else { device->kernels[gcvCORE_VG] = gcvNULL; } /* Initialize the ISR. */ device->irqLines[gcvCORE_MAJOR] = IrqLine; device->irqLines[gcvCORE_2D] = IrqLine2D; device->irqLines[gcvCORE_VG] = IrqLineVG; /* Initialize the kernel thread semaphores. */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0); } device->signal = Signal; for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->kernels[i] != gcvNULL) break; } if (i == gcdMAX_GPU_COUNT) { gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT); } #if gcdENABLE_VG if (i == gcvCORE_VG) { /* Query the ceiling of the system memory. */ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory( device->kernels[i]->vg->hardware, &device->systemMemorySize, &device->systemMemoryBaseAddress )); /* query the amount of video memory */ gcmkONERROR(gckVGHARDWARE_QueryMemory( device->kernels[i]->vg->hardware, &device->internalSize, &internalBaseAddress, &internalAlignment, &device->externalSize, &externalBaseAddress, &externalAlignment, &horizontalTileSize, &verticalTileSize )); } else #endif { /* Query the ceiling of the system memory. */ gcmkONERROR(gckHARDWARE_QuerySystemMemory( device->kernels[i]->hardware, &device->systemMemorySize, &device->systemMemoryBaseAddress )); /* query the amount of video memory */ gcmkONERROR(gckHARDWARE_QueryMemory( device->kernels[i]->hardware, &device->internalSize, &internalBaseAddress, &internalAlignment, &device->externalSize, &externalBaseAddress, &externalAlignment, &horizontalTileSize, &verticalTileSize )); } /* Grab the first availiable kernel */ for (i = 0; i < gcdMAX_GPU_COUNT; i++) { if (device->irqLines[i] != -1) { kernel = device->kernels[i]; break; } } /* Set up the internal memory region. */ if (device->internalSize > 0) { status = gckVIDMEM_Construct( device->os, internalBaseAddress, device->internalSize, internalAlignment, 0, &device->internalVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable internal heap. */ device->internalSize = 0; } else { /* Map internal memory. */ device->internalLogical = (gctPOINTER) ioremap_nocache(physical, device->internalSize); if (device->internalLogical == gcvNULL) { gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical; device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical); physical += device->internalSize; } } if (device->externalSize > 0) { /* create the external memory heap */ status = gckVIDMEM_Construct( device->os, externalBaseAddress, device->externalSize, externalAlignment, 0, &device->externalVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable internal heap. */ device->externalSize = 0; } else { /* Map external memory. */ device->externalLogical = (gctPOINTER) ioremap_nocache(physical, device->externalSize); if (device->externalLogical == gcvNULL) { gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical; device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical); physical += device->externalSize; } } /* set up the contiguous memory */ device->contiguousSize = ContiguousSize; if (ContiguousSize > 0) { if (ContiguousBase == 0) { while (device->contiguousSize > 0) { /* Allocate contiguous memory. */ status = _AllocateMemory( device, device->contiguousSize, &device->contiguousBase, &device->contiguousPhysical, &physAddr ); if (gcmIS_SUCCESS(status)) { device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical); status = gckVIDMEM_Construct( device->os, physAddr | device->systemMemoryBaseAddress, device->contiguousSize, 64, BankSize, &device->contiguousVidMem ); if (gcmIS_SUCCESS(status)) { break; } gcmkONERROR(_FreeMemory( device, device->contiguousBase, device->contiguousPhysical )); gcmRELEASE_NAME(device->contiguousPhysicalName); device->contiguousBase = gcvNULL; device->contiguousPhysical = gcvNULL; } if (device->contiguousSize <= (4 << 20)) { device->contiguousSize = 0; } else { device->contiguousSize -= (4 << 20); } } } else { /* Create the contiguous memory heap. */ status = gckVIDMEM_Construct( device->os, ContiguousBase | device->systemMemoryBaseAddress, ContiguousSize, 64, BankSize, &device->contiguousVidMem ); if (gcmIS_ERROR(status)) { /* Error, disable contiguous memory pool. */ device->contiguousVidMem = gcvNULL; device->contiguousSize = 0; } else { mem_region = request_mem_region( ContiguousBase, ContiguousSize, "galcore managed memory" ); if (mem_region == gcvNULL) { gcmkTRACE_ZONE( gcvLEVEL_ERROR, gcvZONE_DRIVER, "%s(%d): Failed to claim %ld bytes @ 0x%08X\n", __FUNCTION__, __LINE__, ContiguousSize, ContiguousBase ); gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } device->requestedContiguousBase = ContiguousBase; device->requestedContiguousSize = ContiguousSize; #if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG if (gcmIS_CORE_PRESENT(device, gcvCORE_VG)) { device->contiguousBase #if gcdPAGED_MEMORY_CACHEABLE = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize); #else = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize); #endif if (device->contiguousBase == gcvNULL) { device->contiguousVidMem = gcvNULL; device->contiguousSize = 0; gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES); } } #endif device->contiguousPhysical = gcvNULL; device->contiguousPhysicalName = 0; device->contiguousSize = ContiguousSize; device->contiguousMapped = gcvTRUE; } } }
/* 功能描述 : 光纤驱动程序初始化函数 返回值 : 成功为0,失败为-1 参数 : 作者 : 张彦升 日期 : 2014年3月3日 10:23:31 */ static int __init fiber_init(void) { int ret; dev_t devno = 0; int i = 0; int minor_a = 0; int minor_b = 0; if (fiber_major) { devno = MKDEV(fiber_major, 0); ret = register_chrdev_region(devno, FIBER_PAIR_DEVS_NR * 2, "fiber"); } else { ret = alloc_chrdev_region(&devno, 0, FIBER_PAIR_DEVS_NR * 2, "fiber"); fiber_major = MAJOR(devno); } if (ret < 0) { printk(KERN_WARNING "fiber: can't get major %d\n", fiber_major); return ret; } /*初始化光纤信号量*/ sema_init(&fiber_sem,1); for (i = 0;i < FIBER_PAIR_DEVS_NR;i++) { minor_a = i * 2; minor_b = minor_a + 1; fiber_dev_a[i].send_sn = 0; fiber_dev_a[i].last_recv_sn = FIBER_SN_MAX; /*帧号默认比FIBER_SN_MAX大于或等于即可*/ fiber_dev_a[i].recv_lock_port = FIBER_IO_RECV_LOCK_PORT_A; fiber_dev_a[i].recv_buff = fiber_recv_buf_a; fiber_dev_a[i].send_buff = fiber_send_buf; fiber_dev_a[i].recv_addr = ioremap(FIBER_RECV_START_ADDR_A,FIBER_RECV_TOTAL_MEMORY_SIZE_A); fiber_dev_a[i].send_addr = ioremap(FIBER_SEND_START_ADDR,FIBER_SEND_TOTAL_MEMORY_SIZE); fiber_dev_a[i].send_data.data_addr = NULL; fiber_dev_a[i].send_data.sn_addr = NULL; fiber_dev_a[i].recv_data.data_addr = NULL; fiber_dev_a[i].recv_data.sn_addr = NULL; fiber_dev_a[i].buf_size = 0; fiber_dev_a[i].peer_fiber_dev = &fiber_dev_b[i]; fiber_dev_b[i].send_sn = 0; fiber_dev_b[i].last_recv_sn = FIBER_SN_MAX; /*帧号默认比FIBER_SN_MAX大于或等于即可*/ fiber_dev_b[i].recv_lock_port = FIBER_IO_RECV_LOCK_PORT_B; fiber_dev_b[i].recv_buff = fiber_recv_buf_b; fiber_dev_b[i].send_buff = fiber_send_buf; fiber_dev_b[i].recv_addr = ioremap(FIBER_RECV_START_ADDR_B,FIBER_RECV_TOTAL_MEMORY_SIZE_B); fiber_dev_b[i].send_addr = ioremap(FIBER_SEND_START_ADDR,FIBER_SEND_TOTAL_MEMORY_SIZE); fiber_dev_b[i].send_data.data_addr = NULL; fiber_dev_b[i].send_data.sn_addr = NULL; fiber_dev_b[i].recv_data.data_addr = NULL; fiber_dev_b[i].recv_data.sn_addr = NULL; fiber_dev_b[i].buf_size = 0; fiber_dev_b[i].peer_fiber_dev = &fiber_dev_a[i]; ret = fiber_setup_cdev(&fiber_dev_a[i].cdev,minor_a); if (0 > ret) { ret = -EFAULT; goto fail; } ret = fiber_setup_cdev(&fiber_dev_b[i].cdev,minor_b); if (0 > ret) { ret = -EFAULT; goto fail; } } #ifdef FIBER_DEBUG /*创建proc接口*/ fiber_create_proc(); #endif /*FIBER_DEBUG*/ printk(KERN_INFO "fiber init.\n"); return 0; fail: unregister_chrdev_region(MKDEV(fiber_major, 0), FIBER_PAIR_DEVS_NR * 2); /*释放设备号*/ printk(KERN_ERR "fiber_init:failed\n"); return ret; }
static int audio_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { audio_state_t *state = file->private_data; audio_stream_t *os = state->output_stream; audio_stream_t *is = state->input_stream; long val; DPRINTK(__FILE__ " audio_ioctl 0x%08x\n", cmd); /* dispatch based on command */ switch (cmd) { case OSS_GETVERSION: return put_user(SOUND_VERSION, (int *)arg); case SNDCTL_DSP_GETBLKSIZE: if (file->f_mode & FMODE_WRITE) return put_user(os->fragsize, (int *)arg); else return put_user(is->fragsize, (int *)arg); case SNDCTL_DSP_GETCAPS: val = DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP; if (is && os) val |= DSP_CAP_DUPLEX; return put_user(val, (int *)arg); case SNDCTL_DSP_SETFRAGMENT: if (get_user(val, (long *) arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { int ret = audio_set_fragments(is, val); if (ret < 0) return ret; ret = put_user(ret, (int *)arg); if (ret) return ret; } if (file->f_mode & FMODE_WRITE) { int ret = audio_set_fragments(os, val); if (ret < 0) return ret; ret = put_user(ret, (int *)arg); if (ret) return ret; } return 0; case SNDCTL_DSP_SYNC: return audio_sync(file); case SNDCTL_DSP_SETDUPLEX: return 0; case SNDCTL_DSP_POST: return 0; case SNDCTL_DSP_GETTRIGGER: val = 0; if (file->f_mode & FMODE_READ && is->active && !is->stopped) val |= PCM_ENABLE_INPUT; if (file->f_mode & FMODE_WRITE && os->active && !os->stopped) val |= PCM_ENABLE_OUTPUT; return put_user(val, (int *)arg); case SNDCTL_DSP_SETTRIGGER: if (get_user(val, (int *)arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { if (val & PCM_ENABLE_INPUT) { unsigned long flags; if (!is->active) { if (!is->buffers && audio_setup_buf(is)) return -ENOMEM; audio_prime_rx(state); } local_irq_save(flags); is->stopped = 0; audio_process_dma(is); local_irq_restore(flags); } else { audio_stop_dma(is); } } if (file->f_mode & FMODE_WRITE) { if (val & PCM_ENABLE_OUTPUT) { unsigned long flags; if (!os->buffers && audio_setup_buf(os)) return -ENOMEM; local_irq_save(flags); if (os->mapped && !os->pending_frags) { os->pending_frags = os->nbfrags; sema_init(&os->sem, 0); os->active = 1; } os->stopped = 0; audio_process_dma(os); local_irq_restore(flags); } else { audio_stop_dma(os); } } return 0; case SNDCTL_DSP_GETOPTR: case SNDCTL_DSP_GETIPTR: { count_info inf = { 0, }; audio_stream_t *s = (cmd == SNDCTL_DSP_GETOPTR) ? os : is; int bytecount, offset; unsigned long flags; if ((s == is && !(file->f_mode & FMODE_READ)) || (s == os && !(file->f_mode & FMODE_WRITE))) return -EINVAL; if (s->active) { local_irq_save(flags); offset = audio_get_dma_pos(s); inf.ptr = s->dma_tail * s->fragsize + offset; bytecount = s->bytecount + offset; s->bytecount = -offset; inf.blocks = s->fragcount; s->fragcount = 0; local_irq_restore(flags); if (bytecount < 0) bytecount = 0; inf.bytes = bytecount; } return copy_to_user((void *)arg, &inf, sizeof(inf)); } case SNDCTL_DSP_GETOSPACE: case SNDCTL_DSP_GETISPACE: { audio_buf_info inf = { 0, }; audio_stream_t *s = (cmd == SNDCTL_DSP_GETOSPACE) ? os : is; if ((s == is && !(file->f_mode & FMODE_READ)) || (s == os && !(file->f_mode & FMODE_WRITE))) return -EINVAL; if (!s->buffers && audio_setup_buf(s)) return -ENOMEM; inf.bytes = atomic_read(&s->sem.count) * s->fragsize; /* inf.bytes -= s->buffers[s->usr_head].offset; */ inf.fragments = inf.bytes / s->fragsize; inf.fragsize = s->fragsize; inf.fragstotal = s->nbfrags; return copy_to_user((void *)arg, &inf, sizeof(inf)); } case SNDCTL_DSP_NONBLOCK: file->f_flags |= O_NONBLOCK; return 0; case SNDCTL_DSP_RESET: if (file->f_mode & FMODE_READ) { audio_reset(is); if (state->need_tx_for_rx) { unsigned long flags; local_irq_save(flags); os->spin_idle = 0; local_irq_restore(flags); } } if (file->f_mode & FMODE_WRITE) { audio_reset(os); } return 0; default: /* * Let the client of this module handle the * non generic ioctls */ return state->client_ioctl(inode, file, cmd, arg); } return 0; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); mq->queue->limits.max_discard_sectors = UINT_MAX; if (card->erased_byte == 0) mq->queue->limits.discard_zeroes_data = 1; mq->queue->limits.discard_granularity = card->pref_erase << 9; if (mmc_can_secure_erase_trim(card)) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, mq->queue); } #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_segs); } sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
static int lcd_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_lcd *dev = NULL; struct usb_endpoint_descriptor *bulk_in, *bulk_out; int i; int retval; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; kref_init(&dev->kref); sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) { dev_warn(&interface->dev, "USBLCD model not supported.\n"); retval = -ENODEV; goto error; } /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ retval = usb_find_common_endpoints(interface->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); if (retval) { dev_err(&interface->dev, "Could not find both bulk-in and bulk-out endpoints\n"); goto error; } dev->bulk_in_size = usb_endpoint_maxp(bulk_in); dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress; dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL); if (!dev->bulk_in_buffer) { retval = -ENOMEM; goto error; } dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &lcd_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } i = le16_to_cpu(dev->udev->descriptor.bcdDevice); dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found " "at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8, (i & 0xF0)>>4, (i & 0xF), dev->udev->devnum); /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n", interface->minor); return 0; error: kref_put(&dev->kref, lcd_delete); return retval; }