int32_t ats_init_f90(MPI_Fint * comm, int32_t * type_ids, int32_t * num_type_ids, int32_t * num_types) { MPI_Comm _comm = MPI_Comm_f2c(*comm); return ats_init(_comm, type_ids, *num_type_ids, *num_types); } // ats_init_f90
NV_STATUS uvm_global_init(void) { NV_STATUS status; UvmPlatformInfo platform_info; // TODO: Bug 2094794: Several of these early goto errors are wrong, because // they'll call uvm_global_exit before intializing some state that // uvm_global_exit requires, like the global lock and thread contexts. status = uvm_kvmalloc_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_kvmalloc_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_thread_context_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_thread_context_init() failed: %s\n", nvstatusToString(status)); goto error; } uvm_mutex_init(&g_uvm_global.global_lock, UVM_LOCK_ORDER_GLOBAL); uvm_spin_lock_irqsave_init(&g_uvm_global.gpu_table_lock, UVM_LOCK_ORDER_LEAF); uvm_mutex_init(&g_uvm_global.va_spaces.lock, UVM_LOCK_ORDER_VA_SPACES_LIST); INIT_LIST_HEAD(&g_uvm_global.va_spaces.list); status = errno_to_nv_status(nv_kthread_q_init(&g_uvm_global.global_q, "UVM global queue")); if (status != NV_OK) { UVM_DBG_PRINT("nv_kthread_q_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_procfs_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_procfs_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_rm_locked_call(nvUvmInterfaceSessionCreate(&g_uvm_global.rm_session_handle, &platform_info)); if (status != NV_OK) { UVM_ERR_PRINT("nvUvmInterfaceSessionCreate() failed: %s\n", nvstatusToString(status)); return status; } ats_init(&platform_info); g_uvm_global.num_simulated_devices = 0; status = uvm_gpu_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_gpu_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_pmm_sysmem_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_pmm_sysmem_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_mmu_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_mmu_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_mem_global_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_mem_gloal_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_va_range_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_va_range_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_range_group_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_range_group_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_migrate_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_migrate_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_perf_events_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_perf_events_init() failed: %s\n", nvstatusToString(status)); goto error; } status = uvm_perf_heuristics_init(); if (status != NV_OK) { UVM_ERR_PRINT("uvm_perf_heuristics_init() failed: %s\n", nvstatusToString(status)); goto error; } uvm_ats_ibm_init(); // This sets up the ISR (interrupt service routine), by hooking into RM's top-half ISR callback. As soon as this // call completes, GPU interrupts will start arriving, so it's important to be prepared to receive interrupts before // this point: status = uvm8_register_callbacks(); if (status != NV_OK) { UVM_ERR_PRINT("uvm8_register_callbacks failed: %s\n", nvstatusToString(status)); goto error; } return status; error: uvm_global_exit(); return status; }