Interface::~Interface() { TRACE("Interface %p: destructor\n", this); put_device_interface(fDeviceInterface); // Uninitialize the domain datalink protocols DatalinkTable::Iterator iterator = fDatalinkTable.GetIterator(); while (domain_datalink* datalink = iterator.Next()) { put_domain_datalink_protocols(this, datalink->domain); } // Free domain datalink objects domain_datalink* next = fDatalinkTable.Clear(true); while (next != NULL) { domain_datalink* datalink = next; next = next->hash_link; delete datalink; } recursive_lock_destroy(&fLock); // Release reference of the stack - at this point, our stack may be unloaded // if no other interfaces or sockets are left put_module(gNetStackInterfaceModule.info.name); }
status_t unregister_domain(net_domain* _domain) { TRACE(("unregister_domain(%p, %d, %s)\n", _domain, _domain->family, _domain->name)); net_domain_private* domain = (net_domain_private*)_domain; MutexLocker locker(sDomainLock); sDomains.Remove(domain); net_interface_private* interface = NULL; while (true) { interface = (net_interface_private*)list_remove_head_item( &domain->interfaces); if (interface == NULL) break; delete_interface(interface); } recursive_lock_destroy(&domain->lock); delete domain; return B_OK; }
static void uninit() { unregister_generic_syscall(LAUNCH_SPEEDUP_SYSCALLS, 1); recursive_lock_lock(&sLock); // free all sessions from the hashes uint32 cookie = 0; Session *session; while ((session = (Session *)hash_remove_first(sTeamHash, &cookie)) != NULL) { delete session; } cookie = 0; while ((session = (Session *)hash_remove_first(sPrefetchHash, &cookie)) != NULL) { delete session; } // free all sessions from the main prefetch list for (session = sMainPrefetchSessions; session != NULL; ) { sMainPrefetchSessions = session->Next(); delete session; session = sMainPrefetchSessions; } hash_uninit(sTeamHash); hash_uninit(sPrefetchHash); recursive_lock_destroy(&sLock); }
void mtx_destroy(struct mtx *mutex) { if ((mutex->type & MTX_RECURSE) != 0) recursive_lock_destroy(&mutex->u.recursive); else mutex_destroy(&mutex->u.mutex.lock); }
void mtx_destroy(struct mtx *mutex) { if (mutex->type == MTX_DEF) { mutex_destroy(&mutex->u.mutex); } else if (mutex->type == MTX_RECURSE) { recursive_lock_destroy(&mutex->u.recursive); } else panic("Uh-oh, someone is pressing the wrong buttons"); }
status_t uninit_interfaces() { #if ENABLE_DEBUGGER_COMMANDS remove_debugger_command("net_interface", &dump_interface); remove_debugger_command("net_interfaces", &dump_interfaces); remove_debugger_command("net_local", &dump_local); remove_debugger_command("net_route", &dump_route); #endif recursive_lock_destroy(&sLock); mutex_destroy(&sHashLock); return B_OK; }
status_t unregister_domain(net_domain* _domain) { TRACE(("unregister_domain(%p, %d, %s)\n", _domain, _domain->family, _domain->name)); net_domain_private* domain = (net_domain_private*)_domain; MutexLocker locker(sDomainLock); sDomains.Remove(domain); recursive_lock_destroy(&domain->lock); delete domain; return B_OK; }
int vm_translation_map_create(vm_translation_map *new_map, bool kernel) { ASSERT(new_map); // initialize the new object new_map->ops = &tmap_ops; new_map->map_count = 0; if(recursive_lock_create(&new_map->lock) < 0) return ERR_NO_MEMORY; new_map->arch_data = kmalloc(sizeof(vm_translation_map_arch_info)); if(new_map->arch_data == NULL) { recursive_lock_destroy(&new_map->lock); return ERR_NO_MEMORY; } if (!kernel) { // user vm_page *page = vm_page_allocate_page(PAGE_STATE_CLEAR); list_add_head(&new_map->arch_data->pagetable_list, &page->queue_node); new_map->arch_data->pgdir_phys = page->ppn * PAGE_SIZE; get_physical_page_tmap(page->ppn * PAGE_SIZE, (addr_t *)&new_map->arch_data->pgdir_virt, PHYSICAL_PAGE_NO_WAIT); // copy the kernel bits into this one (one entry at the top) memcpy(new_map->arch_data->pgdir_virt + 256, (unsigned long *)kernel_pgdir_virt + 256, sizeof(unsigned long) * 256); } else { // kernel top level page dir is already allocated new_map->arch_data->pgdir_phys = kernel_pgdir_phys; new_map->arch_data->pgdir_virt = (unsigned long *)kernel_pgdir_virt; vm_page *page = vm_lookup_page(kernel_pgdir_phys / PAGE_SIZE); TMAP_TRACE("page %p, state %d\n", page, page->state); list_add_head(&new_map->arch_data->pagetable_list, &page->queue_node); // zero out the bottom of it, where user space mappings would go memset(new_map->arch_data->pgdir_virt, 0, sizeof(unsigned long) * 256); // XXX account for prexisting kernel page tables } return 0; }
static status_t init() { sTeamHash = hash_init(64, Session::NextOffset(), &team_compare, &team_hash); if (sTeamHash == NULL) return B_NO_MEMORY; status_t status; sPrefetchHash = hash_init(64, Session::NextOffset(), &prefetch_compare, &prefetch_hash); if (sPrefetchHash == NULL) { status = B_NO_MEMORY; goto err1; } recursive_lock_init(&sLock, "launch speedup"); // register kernel syscalls if (register_generic_syscall(LAUNCH_SPEEDUP_SYSCALLS, launch_speedup_control, 1, 0) != B_OK) { status = B_ERROR; goto err3; } // read in prefetch knowledge base mkdir("/etc/launch_cache", 0755); load_prefetch_data(); // start boot session sMainSession = start_session(-1, -1, -1, "system boot"); sMainSession->Unlock(); dprintf("START BOOT %Ld\n", system_time()); return B_OK; err3: recursive_lock_destroy(&sLock); hash_uninit(sPrefetchHash); err1: hash_uninit(sTeamHash); return status; }
VMTranslationMap::~VMTranslationMap() { recursive_lock_destroy(&fLock); }
status_t fs_mount(fs_volume *_vol, const char *device, ulong flags, const char *args, ino_t *_rootID) { nspace *ns; vnode *newNode = NULL; char lockname[32]; void *handle; unsigned long mountFlags = 0; status_t result = B_NO_ERROR; ERRPRINT("fs_mount - ENTER\n"); ns = ntfs_malloc(sizeof(nspace)); if (!ns) { result = ENOMEM; goto exit; } *ns = (nspace) { .state = NF_FreeClustersOutdate | NF_FreeMFTOutdate, .show_sys_files = false, .ro = false, .flags = 0 }; strcpy(ns->devicePath,device); sprintf(lockname, "ntfs_lock %lx", ns->id); recursive_lock_init_etc(&(ns->vlock), lockname, MUTEX_FLAG_CLONE_NAME); handle = load_driver_settings("ntfs"); ns->show_sys_files = ! (strcasecmp(get_driver_parameter(handle, "hide_sys_files", "true", "true"), "true") == 0); ns->ro = strcasecmp(get_driver_parameter(handle, "read_only", "false", "false"), "false") != 0; ns->noatime = strcasecmp(get_driver_parameter(handle, "no_atime", "true", "true"), "true") == 0; unload_driver_settings(handle); if (ns->ro || (flags & B_MOUNT_READ_ONLY) != 0) { mountFlags |= MS_RDONLY; ns->flags |= B_FS_IS_READONLY; } // TODO: this does not take read-only volumes into account! ns->ntvol = utils_mount_volume(device, mountFlags, true); if (ns->ntvol != NULL) result = B_NO_ERROR; else result = errno; if (result == B_NO_ERROR) { *_rootID = FILE_root; ns->id = _vol->id; _vol->private_volume = (void *)ns; _vol->ops = &gNTFSVolumeOps; newNode = (vnode*)ntfs_calloc(sizeof(vnode)); if (newNode == NULL) result = ENOMEM; else { newNode->vnid = *_rootID; newNode->parent_vnid = -1; result = publish_vnode(_vol, *_rootID, (void*)newNode, &gNTFSVnodeOps, S_IFDIR, 0); if (result != B_NO_ERROR) { free(ns); result = EINVAL; goto exit; } else { result = B_NO_ERROR; ntfs_mark_free_space_outdated(ns); ntfs_calc_free_space(ns); } } } exit: ERRPRINT("fs_mount - EXIT, result code is %s\n", strerror(result)); return result; } status_t fs_unmount(fs_volume *_vol) { nspace *ns = (nspace*)_vol->private_volume; status_t result = B_NO_ERROR; ERRPRINT("fs_unmount - ENTER\n"); ntfs_umount(ns->ntvol, true); recursive_lock_destroy(&(ns->vlock)); free(ns); ERRPRINT("fs_unmount - EXIT, result is %s\n", strerror(result)); return result; }
DefaultNotificationService::~DefaultNotificationService() { NotificationManager::Manager().UnregisterService(*this); recursive_lock_destroy(&fLock); }