/* * 写nand接口 * mtd : mtd device * off : loggic offset in this file,need * len : data len write to flash ,len <= mtd->erasesize * ptr : the data need to write */ u32 nv_mtd_write(struct nv_flash_file_header_stru* ffp,FSZ off,u32 len,u8* ptr) { u32 ret; u32 offset = 0; /*传进来的偏移相对于文件头的逻辑偏移*/ struct mtd_info* mtd = ffp->mtd; ret = nv_sec_off_count(ffp,off,&offset); if(ret != NAND_OK) { nv_printf("%s\n",mtd->name); return ret; } if ( NV_FILE_SYS_NV == ffp->flash_type ) { ret = (u32)bsp_nand_write_dload((char*)mtd->name,offset,ptr,len); } else { ret = (u32)bsp_nand_write((char*)mtd->name,offset,ptr,len); } if(ret) { nv_printf("%s\n",mtd->name); return ret; } return ret; }
int nv_gvi_kern_resume( struct pci_dev *dev ) { nv_state_t *nv; nv_linux_state_t *lnv = NULL; nv_stack_t *sp = NULL; int status = RM_OK; nv_printf(NV_DBG_INFO, "NVGVI: Begin resuming GVI device!\n"); lnv = pci_get_drvdata(dev); if ((!lnv) || (lnv->dev != dev)) { nv_printf(NV_DBG_WARNINGS, "NVGVI: PM: invalid device!\n"); return -1; } NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVGVI: failed to allocate stack!\n"); return -1; } nv = NV_STATE_PTR(lnv); status = rm_gvi_resume(sp, nv); if (status == RM_OK) nv->flags &= ~NV_FLAG_GVI_IN_SUSPEND; NV_KMEM_CACHE_FREE_STACK(sp); return status; }
u32 bsp_nvm_init(void) { u32 ret = NV_ERROR; struct nv_global_ddr_info_stru* ddr_info = (struct nv_global_ddr_info_stru*)NV_GLOBAL_INFO_ADDR; #ifdef BSP_CONFIG_HI3630 nv_printf("waiting for ap modem nv init ok .......\n"); BSP_SYNC_Wait(SYNC_MODULE_NV,0); #endif nv_debug(NV_FUN_NVM_INIT,0,0,0,0); if(ddr_info->ccore_init_state < NV_BOOT_INIT_OK) { nv_printf("[%s]:pre init fail,break here!\n",__FUNCTION__); nv_debug(NV_FUN_NVM_INIT,1,0,0,0); /*lint -save -e801*/ goto nv_init_fail; /*lint -restore*/ } g_nv_ctrl.shared_addr = (u32)NV_GLOBAL_INFO_ADDR; spin_lock_init(&g_nv_ctrl.spinlock); ret = nv_icc_chan_init(); if(ret) { nv_debug(NV_FUN_NVM_INIT,2,ret,0,0); /*lint -save -e801*/ goto nv_init_fail; /*lint -restore*/ } osl_sem_init(1,&g_nv_ctrl.rw_sem); osl_sem_init(0,&g_nv_ctrl.cc_sem); ret = bsp_nvm_read(NV_ID_MSP_FLASH_LESS_MID_THRED,(u8*)&g_nv_ctrl.mid_prio,sizeof(u32)); if(ret) { g_nv_ctrl.mid_prio = 20; nv_printf("read 0x%x error : 0x%x,use default count\n",NV_ID_MSP_FLASH_LESS_MID_THRED,ret); } ret = (u32)bsp_ipc_sem_create(IPC_SEM_NV_CRC); if(ret) { nv_debug(NV_FUN_KERNEL_INIT,3 ,ret ,0,0); /*lint -save -e801*/ goto nv_init_fail; /*lint -restore*/ } ddr_info->ccore_init_state = NV_INIT_OK; nv_printf("nv init ok !\n"); INIT_LIST_HEAD(&g_nv_ctrl.stList); return NV_OK; nv_init_fail: ddr_info->ccore_init_state = NV_INIT_FAIL; nv_printf("\n[%s]\n",__FUNCTION__); nv_help(NV_FUN_NVM_INIT); return ret; }
u32 nv_crc_write_test04(void) { struct nv_ref_data_info_stru nvArray[10] = {}; u32 count = 0; u32 i = 0; u32 ret = 0; u8 *pOldNvData = NULL; u8 *pNewNvData = NULL; pOldNvData = (u8 *)nv_malloc(NV_MAX_UNIT_SIZE); pNewNvData = (u8 *)nv_malloc(NV_MAX_UNIT_SIZE); if((pOldNvData == NULL)||(pNewNvData == NULL)) { nv_printf("malloc error 1111\n"); return NV_ERROR; } count = nv_test_find_edge_nv(nvArray); for(i = 0; i < count; i++) { ret = bsp_nvm_read(nvArray[i].itemid, (u8 *)pOldNvData, nvArray[i].nv_len); if(ret) { nv_printf("read error 222, nvid = 0x%x\n", nvArray[i].itemid); return ret; } pOldNvData[0]+=2; ret = bsp_nvm_write(nvArray[i].itemid, (u8 *)pOldNvData, nvArray[i].nv_len); if(ret) { nv_printf("read error 3333, nvid = 0x%x\n", nvArray[i].itemid); return ret; } ret = bsp_nvm_read(nvArray[i].itemid, (u8 *)pNewNvData, nvArray[i].nv_len); if((ret)||(pNewNvData[0] != pOldNvData[0])) { nv_printf("read error 4444, nvid = 0x%x\n", nvArray[i].itemid); return ret; } ret = nv_check_ddr_crc(); if(ret) { nv_printf("read error 5555, nvid = 0x%x\n", nvArray[i].itemid); return ret; } } nv_free(pOldNvData); nv_free(pNewNvData); return NV_OK; }
RM_STATUS NV_API_CALL os_registry_init(void) { nv_parm_t *entry; unsigned int i; nv_stack_t *sp = NULL; NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate stack!\n"); return RM_ERR_NO_FREE_MEM; } if (NVreg_RmMsg != NULL) { rm_write_registry_string(sp, NULL, "NVreg", "RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg)); } parse_option_string(sp); for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) { rm_write_registry_dword(sp, NULL, entry->node, entry->name, *entry->data); } NV_KMEM_CACHE_FREE_STACK(sp); return RM_OK; }
/* * (see above for additional information) * * If the 'at' usage count drops to zero with the updated logic, the * VMA's file pointer is saved; nv_kern_close() uses it to find * these allocations when the parent file descriptor is closed. This * will typically happen when the process exits. * * Since this is technically a workaround to handle possible fallout * from misbehaving clients, we addtionally print a warning. */ static void nv_kern_vma_release(struct vm_area_struct *vma) { NV_PRINT_VMA(NV_DBG_MEMINFO, vma); if (NV_VMA_PRIVATE(vma)) { nv_alloc_t *at = (nv_alloc_t *) NV_VMA_PRIVATE(vma); if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) { static int count = 0; if ((at->pid == os_get_current_process()) && (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) { nv_printf(NV_DBG_MEMINFO, "NVRM: VM: %s: late unmap, comm: %s, 0x%p\n", __FUNCTION__, current->comm, at); } at->file = NV_VMA_FILE(vma); } if (!NV_ALLOC_MAPPING_AGP(at->flags)) { NV_PRINT_AT(NV_DBG_MEMINFO, at); nv_vm_list_page_count(at->page_table, at->num_pages); } } }
RM_STATUS KernAllocAGPPages( nv_stack_t *sp, nv_state_t *nv, NvU32 PageCount, void **pPriv_data, NvU32 *Offset ) { #ifndef AGPGART return RM_ERROR; #else nv_linux_state_t *nvl; agp_memory *ptr; nvl = NV_GET_NVL_FROM_NV_STATE(nv); if (rm_alloc_agp_bitmap(sp, nv, PageCount, Offset)) { nv_printf(NV_DBG_INFO, "NVRM: AGPGART: failed to allocate AGP offset\n"); return RM_ERROR; } ptr = NV_AGPGART_ALLOCATE_MEMORY(drm_agp_p, nvl->agp_bridge, PageCount, AGP_NORMAL_MEMORY); if (ptr == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: no pages available\n"); rm_free_agp_bitmap(sp, nv, PageCount, *Offset); return RM_ERR_NO_FREE_MEM; } if (NV_AGPGART_BIND_MEMORY(drm_agp_p, ptr, *Offset)) { nv_printf(NV_DBG_INFO, "NVRM: AGPGART: unable to bind %u pages\n", PageCount); NV_AGPGART_FREE_MEMORY(drm_agp_p, ptr); rm_free_agp_bitmap(sp, nv, PageCount, *Offset); return RM_ERROR; } *pPriv_data = (void *)ptr; return RM_OK; #endif /* AGPGART */ }
/* * 写nand接口 * mtd : mtd device * off : loggic offset in this file,need * len : data len write to flash ,len <= mtd->erasesize * ptr : the data need to write */ u32 nv_mtd_write(struct nv_emmc_file_header_stru* fd,FSZ off,u32 len,u8* ptr) { u32 ret; u32 offset = 0; /*传进来的偏移相对于文件头的逻辑偏移*/ struct mtd_info* mtd = fd->mtd; ret = nv_sec_off_count(fd,off,&offset); if(ret != NAND_OK) { nv_printf("%s\n",mtd->name); return ret; } ret = (u32)bsp_nand_write((char*)mtd->name,offset,ptr,len); if(ret) { nv_printf("%s\n",mtd->name); return ret; } return ret; }
int nv_register_chrdev(int major, struct file_operations *fops) { int status; status = register_chrdev(major, NV_DEV_NAME, fops); if (status < 0) { nv_printf(NV_DBG_ERRORS, "NVRM: register_chrdev() failed!\n"); return status; } return 0; }
/*循环写入一个NV,测试双核的互斥是否有效*/ u32 nv_crc_write_test05(void) { u32 ret = 0; u32 nvid = 0xD007; u32 data = 0; u32 i = 0; for(i = 0; i < 1000; i++) { printf("\n****************第%d次测试开始**************************\n", i); ret = bsp_nvm_read(nvid, (u8 *)&data, sizeof(u32)); if(ret) { nv_printf("read fail ,ret = 0x%x\n", ret); return ret; } data++; printf("\n****************第%d次测试开始 11111**************************\n", i); DelayMs(g_crc_delay_ctrl, 0); ret = bsp_nvm_write(nvid, (u8 *)&data, sizeof(u32)); if(ret) { nv_printf("write fail ,ret = 0x%x\n", ret); return ret; } } printf("\n****************第%d次测试开始 2222**************************\n", i); data = 20; ret = bsp_nvm_write(nvid, (u8 *)&data, sizeof(u32)); if(ret) { nv_printf("write fail 22222,ret = 0x%x\n", ret); return ret; } return NV_OK; }
u32 nv_write_test_08(u32 itemid) { struct nv_file_list_info_stru file_info = {}; struct nv_ref_data_info_stru ref_info = {}; u8* pData = NULL; u32 ret = 0; pData = (u8*)nv_malloc(2*2048); if(NULL == pData) { nv_printf("alloc error\n"); return NV_ERROR; } ret = nv_search_byid(itemid, (u8 *)NV_GLOBAL_CTRL_INFO_ADDR, &ref_info, &file_info); if(ret) { nv_printf("nv_search_byid error\n"); nv_free(pData); return ret; } ret = bsp_nvm_read(itemid, pData, ref_info.nv_len); if(ret) { nv_printf("bsp_nvm_read error, ret = 0x%x 1111\n", ret); return ret; } pData[0]++; ret = bsp_nvm_write(itemid, pData, ref_info.nv_len); if(ret) { nv_printf("bsp_nvm_read error, ret = 0x%x 2222\n", ret); return ret; } nv_free(pData); return NV_OK; }
u32 bsp_nvm_flushEx(u32 off,u32 len,u32 itemid) { u32 ret = NV_ERROR; struct nv_icc_stru icc_req = {0}; struct nv_icc_stru icc_cnf = {0}; struct nv_global_ddr_info_stru* ddr_info = (struct nv_global_ddr_info_stru*)NV_GLOBAL_INFO_ADDR; nv_debug(NV_API_FLUSH,0,0,0,0); if(NV_INIT_OK != ddr_info->ccore_init_state) { nv_debug(NV_API_FLUSH,1,ddr_info->ccore_init_state,0,0); return BSP_ERR_NV_MEM_INIT_FAIL; } icc_req.msg_type = NV_ICC_REQ; icc_req.data_len = len; icc_req.data_off = off; icc_req.ret = 93; /*use to test ,no meaning*/ icc_req.itemid = itemid; icc_req.slice = bsp_get_slice_value(); ret = nv_icc_send((u8*)&icc_req,sizeof(icc_req)); if(ret) { nv_debug(NV_API_FLUSH,2,0,ret,0); return ret; } /*lint -save -e534*/ if(osl_sem_downtimeout(&g_nv_ctrl.cc_sem,NV_MAX_WAIT_TICK)) { nv_printf("down time out\n"); } /*lint -restore +e534*/ memcpy(&icc_cnf,g_nv_ctrl.nv_icc_buf,sizeof(icc_cnf)); if(icc_cnf.msg_type != NV_ICC_CNF) { nv_debug(NV_API_FLUSH,3,0,0,icc_cnf.msg_type); /*lint -save -e515 -e516*/ printf_nv("\n[%s]\n",__FUNCTION__); /*lint -restore*/ nv_help(NV_API_FLUSH); return BSP_ERR_NV_INVALID_PARAM; } return icc_cnf.ret; }
int nv_gvi_kern_suspend( struct pci_dev *dev, pm_message_t state ) { nv_state_t *nv; nv_linux_state_t *lnv = NULL; int status = RM_OK; nv_stack_t *sp = NULL; nv_printf(NV_DBG_INFO, "NVGVI: Begin suspending GVI device!\n"); lnv = pci_get_drvdata(dev); if ((!lnv) || (lnv->dev != dev)) { nv_printf(NV_DBG_WARNINGS, "NVGVI: PM: invalid device!\n"); return -1; } NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVGVI: failed to allocate stack!\n"); return -1; } nv = NV_STATE_PTR(lnv); status = rm_shutdown_gvi_device(sp, nv); if (status != 0) { nv_printf(NV_DBG_ERRORS, "NVGVI: failed to stop gvi!\n"); goto failed; } nv->flags |= NV_FLAG_GVI_IN_SUSPEND; NV_TASKQUEUE_FLUSH(); status = rm_gvi_suspend(sp, nv); if (status != 0) { nv->flags &= ~NV_FLAG_GVI_IN_SUSPEND; nv_printf(NV_DBG_ERRORS, "NVGVI: failed to suspend gvi!\n"); goto failed; } nv_printf(NV_DBG_INFO, "NVGVI: End suspending GVI device!\n"); failed: NV_KMEM_CACHE_FREE_STACK(sp); return status; }
u32 nv_test_sem_func(void) { u32 start = 0; u32 end = 0; if(0 == nv_sem_ctrl) { osl_sem_init(0,&nv_test_sem); nv_sem_ctrl = 1; } start = bsp_get_slice_value(); if(osl_sem_downtimeout(&nv_test_sem, 10)) { end = bsp_get_slice_value(); } nv_printf("slice: 0x%x\n", end - start); return 0; }
RM_STATUS NV_API_CALL os_registry_init(void) { nv_parm_t *entry; unsigned int i; nv_stack_t *sp = NULL; NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate stack!\n"); return RM_ERR_NO_MEMORY; } if (NVreg_RmMsg != NULL) { rm_write_registry_string(sp, NULL, "NVreg", "RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg)); } memset(&nv_assign_gpu_pci_info, 0, sizeof(nv_assign_gpu_pci_info)); #if !defined(NV_VMWARE) if (parse_assign_gpus_string()) { rm_write_registry_string(sp, NULL, "NVreg", NV_REG_ASSIGN_GPUS, NVreg_AssignGpus, strlen(NVreg_AssignGpus)); } #endif parse_option_string(sp); detect_virtualization_and_apply_defaults(sp); for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) { rm_write_registry_dword(sp, NULL, entry->node, entry->name, *entry->data); } NV_KMEM_CACHE_FREE_STACK(sp); return RM_OK; }
static int nv_i2c_algo_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { nv_state_t *nv = (nv_state_t *)adap->algo_data; unsigned int i = 0; int rc = -EIO; RM_STATUS rmStatus = RM_OK; nv_stack_t *sp = NULL; NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate stack!\n"); return -ENOMEM; } for (i = 0; ((i < (unsigned int)num) && (rmStatus == RM_OK)); i++) { if (msgs[i].flags & ~I2C_M_RD) { /* we don't support I2C_FUNC_10BIT_ADDR, I2C_FUNC_PROTOCOL_MANGLING */ rc = -EINVAL; rmStatus = RM_ERR_INVALID_ARGUMENT; } else if (msgs[i].flags & I2C_M_RD) { rmStatus = rm_i2c_read_buffer(sp, nv, (void *)adap, (NvU8)(msgs[i].addr & 0x7f), (NvU32)(msgs[i].len & 0xffffUL), (NvU8 *)msgs[i].buf); } else { rmStatus = rm_i2c_write_buffer(sp, nv, (void *)adap, (NvU8)(msgs[i].addr & 0x7f), (NvU32)(msgs[i].len & 0xffffUL), (NvU8 *)msgs[i].buf); } } NV_KMEM_CACHE_FREE_STACK(sp); return (rmStatus != RM_OK) ? rc : num; }
static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info) { nv_linux_state_t *nvl; unsigned int count; LOCK_NV_LINUX_DEVICES(); count = 0; for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) { nv_state_t *nv = NV_STATE_PTR(nvl); /* * The gpu_info[] array has NV_MAX_GPUS elements. Fail if there * are more GPUs than that. */ if (count >= NV_MAX_GPUS) { nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.", NV_MAX_GPUS); count = 0; break; } gpu_info[count].gpu_id = nv->gpu_id; gpu_info[count].pci_info.domain = nv->pci_info.domain; gpu_info[count].pci_info.bus = nv->pci_info.bus; gpu_info[count].pci_info.slot = nv->pci_info.slot; gpu_info[count].pci_info.function = nv->pci_info.function; gpu_info[count].os_dev_ptr = nvl->dev; count++; } UNLOCK_NV_LINUX_DEVICES(); return count; }
RM_STATUS KernTeardownAGP( nv_stack_t *sp, nv_state_t *nv ) { #ifndef AGPGART return RM_ERROR; #else RM_STATUS status; nv_linux_state_t *nvl; void *bitmap; nvl = NV_GET_NVL_FROM_NV_STATE(nv); #ifdef CONFIG_MTRR if (nv_pat_mode == NV_PAT_MODE_DISABLED) mtrr_del(-1, nv->agp.address, nv->agp.size); #endif NV_AGPGART_BACKEND_RELEASE(drm_agp_p, nvl->agp_bridge); #if defined(KERNEL_2_4) inter_module_put("drm_agp"); #endif status = rm_clear_agp_bitmap(sp, nv, &bitmap); if (status != RM_OK) { nv_printf(NV_DBG_WARNINGS, "NVRM: AGPGART: failed to clear bitmap\n"); return status; } os_free_mem(bitmap); return RM_OK; #endif /* AGPGART */ }
s32 nv_file_copy(s8* dst_path,s8* src_path,bool path) { u32 ret = NV_ERROR; FILE* dst_fp = NULL; FILE* src_fp = NULL; u32 u_ulen; /*文件拷贝单位长度*/ u32 u_tlen; /*源文件总长度*/ void* pdata; /*文件拷贝临时buffer*/ src_fp = nv_file_open(src_path,NV_FILE_READ); dst_fp = nv_file_open(dst_path,NV_FILE_WRITE); if(!src_fp || !dst_fp) { nv_printf("open fail src :%p,dst :%p\n",src_fp,dst_fp); return BSP_ERR_NV_NO_FILE; } u_tlen = nv_get_file_len(src_fp); if(u_tlen >= NV_MAX_FILE_SIZE) { nv_printf("u_tlen :0x%x\n",u_tlen); goto out; } pdata = (void*)nv_malloc(NV_FILE_COPY_UNIT_SIZE);/*拷贝单位长度为16k*/ if(!pdata) { nv_printf("malloc failed !\n"); goto out; } while(u_tlen) { u_ulen = u_tlen > NV_FILE_COPY_UNIT_SIZE ? u_tlen :NV_FILE_COPY_UNIT_SIZE; ret = (u32)nv_file_read(pdata,1,u_ulen,src_fp); if(ret != u_ulen) { nv_printf("ret :0x%x u_ulen: 0x%x\n",ret,u_ulen); goto out1; } ret = (u32)nv_file_write(pdata,1,u_ulen,dst_fp); if(ret != u_ulen) { nv_printf("ret :0x%x u_ulen: 0x%x\n",ret,u_ulen); goto out1; } u_tlen -= u_ulen; } (void)nv_file_close(src_fp); (void)nv_file_close(dst_fp); nv_free(pdata); return NV_OK; out1: nv_free(pdata); out: (void)nv_file_close(src_fp); (void)nv_file_close(dst_fp); return -1; }
static NvBool parse_assign_gpus_string(void) { char *option_string = NULL; char *ptr, *token; if (NVreg_AssignGpus == NULL) { return NV_FALSE; } if ((option_string = remove_spaces(NVreg_AssignGpus)) == NULL) { return NV_FALSE; } ptr = option_string; // token string should be in formats: // bus:slot // domain:bus:slot // domain:bus:slot.func while ((token = strsep(&ptr, ",")) != NULL) { char *pci_info, *p, *q, *r, *func = token; NvU32 domain, bus, slot; if (!strlen(token)) { continue; } if ((pci_info = strsep(&func, ".")) != NULL) { // PCI device can have maximum 8 functions and for GPUs, function // field is always 0 if ((func != NULL) && ((*func != '0') || (strlen(func) > 1))) { nv_printf(NV_DBG_ERRORS, "NVRM: NVreg_AssignGpus: Invalid PCI function in token %s\n", token); continue; } domain = simple_strtoul(pci_info, &p, 16); if ((p == NULL) || (*p != ':') || (*(p + 1) == '\0')) { nv_printf(NV_DBG_ERRORS, "NVRM: NVreg_AssignGpus: Invalid PCI domain/bus in token %s\n", token); continue; } bus = simple_strtoul((p + 1), &q, 16); if (q == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: NVreg_AssignGpus: Invalid PCI bus/slot in token %s\n", token); continue; } if (*q != '\0') { if ((*q != ':') || (*(q + 1) == '\0')) { nv_printf(NV_DBG_ERRORS, "NVRM: NVreg_AssignGpus: Invalid PCI slot in token %s\n", token); continue; } slot = (NvU32)simple_strtoul(q + 1, &r, 16); if ((slot == 0) && ((q + 1) == r)) { nv_printf(NV_DBG_ERRORS, "NVRM: NVreg_AssignGpus: Invalid PCI slot in token %s\n", token); continue; } nv_assign_gpu_pci_info[nv_assign_gpu_count].domain = domain; nv_assign_gpu_pci_info[nv_assign_gpu_count].bus = bus; nv_assign_gpu_pci_info[nv_assign_gpu_count].slot = slot; } else { nv_assign_gpu_pci_info[nv_assign_gpu_count].domain = 0; nv_assign_gpu_pci_info[nv_assign_gpu_count].bus = domain; nv_assign_gpu_pci_info[nv_assign_gpu_count].slot = bus; } nv_assign_gpu_count++; if (nv_assign_gpu_count == NV_MAX_DEVICES) break; } } os_free_mem(option_string); return (nv_assign_gpu_count ? NV_TRUE : NV_FALSE); }
RM_STATUS KernInitAGP( nv_stack_t *sp, nv_state_t *nv, NvU64 *ap_phys_base, NvU64 *ap_limit ) { #ifndef AGPGART return RM_ERROR; #else RM_STATUS status = RM_ERROR; nv_linux_state_t *nvl; void *bitmap; agp_kern_info agp_info; NvU32 bitmap_size; NvU32 agp_rate = (8 | 4 | 2 | 1); NvU32 enable_sba = 0; NvU32 enable_fw = 0; NvU32 agp_mode = 0; #if defined(KERNEL_2_4) if (!(drm_agp_p = inter_module_get_request("drm_agp", "agpgart"))) return RM_ERR_NOT_SUPPORTED; #endif /* NOTE: from here down, return an error code of '-1' * that indicates that agpgart is loaded, but we failed to use it * in some way. This is so we don't try to use nvagp and lock up * the memory controller. */ nvl = NV_GET_NVL_FROM_NV_STATE(nv); if (NV_AGPGART_BACKEND_ACQUIRE(drm_agp_p, nvl->agp_bridge, nvl->dev)) { nv_printf(NV_DBG_INFO, "NVRM: AGPGART: no backend available\n"); status = RM_ERR_NOT_SUPPORTED; goto bailout; } if (NV_AGPGART_COPY_INFO(drm_agp_p, nvl->agp_bridge, &agp_info)) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: kernel reports chipset as unsupported\n"); goto release; } if (nv_pat_mode == NV_PAT_MODE_DISABLED) { #ifdef CONFIG_MTRR /* * Failure to set a write-combining range on the AGP aperture may * be due to the presence of other memory ranges with conflicting * caching attributes. Play safe and fail AGP initialization. */ if (mtrr_add(agp_info.aper_base, agp_info.aper_size << 20, MTRR_TYPE_WRCOMB, 0) < 0) #endif { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to set MTRR write-combining\n"); goto release; } } // allocate and set the bitmap for tracking agp allocations bitmap_size = (agp_info.aper_size << 20)/PAGE_SIZE/8; if (os_alloc_mem(&bitmap, bitmap_size)) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to allocate bitmap\n"); goto failed; } os_mem_set(bitmap, 0xff, bitmap_size); status = rm_set_agp_bitmap(sp, nv, bitmap); if (status != RM_OK) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to set bitmap\n"); os_free_mem(bitmap); goto failed; } agp_mode = agp_info.mode; rm_read_registry_dword(sp, NULL, "NVreg", "ReqAGPRate", &agp_rate); agp_mode = NV_AGPGART_MODE_BITS_RATE(agp_mode, agp_rate); agp_mode |= 1; /* avoid 0x mode request */ if (agp_mode & 2) agp_mode &= ~1; if (agp_mode & 4) agp_mode &= ~2; rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPSBA", &enable_sba); agp_mode |= NV_AGPGART_MODE_BITS_SBA(enable_sba); rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPFW", &enable_fw); agp_mode |= NV_AGPGART_MODE_BITS_FW(enable_fw); agp_info.mode &= (0xff000000 | agp_mode); NV_AGPGART_BACKEND_ENABLE(drm_agp_p, nvl->agp_bridge, agp_info.mode); *ap_phys_base = (unsigned)agp_info.aper_base; *ap_limit = (unsigned)((agp_info.aper_size << 20) - 1); return RM_OK; failed: #ifdef CONFIG_MTRR if (nv_pat_mode == NV_PAT_MODE_DISABLED) mtrr_del(-1, agp_info.aper_base, agp_info.aper_size << 20); #endif release: NV_AGPGART_BACKEND_RELEASE(drm_agp_p, nvl->agp_bridge); bailout: #if defined(KERNEL_2_4) inter_module_put("drm_agp"); #endif return status; #endif /* AGPGART */ }
static int nv_i2c_algo_smbus_xfer( struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data ) { nv_state_t *nv = (nv_state_t *)adap->algo_data; int rc = -EIO; RM_STATUS rmStatus = RM_OK; nv_stack_t *sp = NULL; NV_KMEM_CACHE_ALLOC_STACK(sp); if (sp == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate stack!\n"); return -ENOMEM; } switch (size) { case I2C_SMBUS_QUICK: rmStatus = rm_i2c_smbus_write_quick(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), (read_write == I2C_SMBUS_READ)); break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) { rmStatus = rm_i2c_read_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), 1, /* single byte transfer */ (NvU8 *)&data->byte); } else { u8 data = command; rmStatus = rm_i2c_write_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), 1, /* single byte transfer */ (NvU8 *)&data); } break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) { rmStatus = rm_i2c_smbus_read_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), (NvU8)command, 1, /* single byte transfer */ (NvU8 *)&data->byte); } else { rmStatus = rm_i2c_smbus_write_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), (NvU8)command, 1, /* single byte transfer */ (NvU8 *)&data->byte); } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) { rmStatus = rm_i2c_smbus_read_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), (NvU8)command, 2, /* single word transfer */ (NvU8 *)&data->block[1]); data->word = ((NvU16)data->block[1]) | ((NvU16)data->block[2] << 8); } else { u16 word = data->word; data->block[1] = (word & 0xff); data->block[2] = (word >> 8); rmStatus = rm_i2c_smbus_write_buffer(sp, nv, (void *)adap, (NvU8)(addr & 0x7f), (NvU8)command, 2, /* single word transfer */ (NvU8 *)&data->block[1]); } break; default: rc = -EINVAL; rmStatus = RM_ERR_INVALID_ARGUMENT; } NV_KMEM_CACHE_FREE_STACK(sp); return (rmStatus != RM_OK) ? rc : 0; }