kern_return_t projected_buffer_collect(vm_map_t map) { vm_map_entry_t entry, next; if (map == VM_MAP_NULL || map == kernel_map) return(KERN_INVALID_ARGUMENT); for (entry = vm_map_first_entry(map); entry != vm_map_to_entry(map); entry = next) { next = entry->vme_next; if (entry->projected_on != 0) projected_buffer_deallocate(map, entry->vme_start, entry->vme_end); } return(KERN_SUCCESS); }
nw_result mk_endpoint_deallocate_internal(nw_ep ep, task_t task, boolean_t shutdown) { nw_result rc; nw_pv_t pv, pv_previous; nw_ep_owned_t owned, owned_previous; nw_waiter_t w, w_previous, w_next; nw_lock(); if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) { rc = NW_BAD_EP; } else { pv_previous = NULL; while (pv != NULL && pv->owner != task) { pv_previous = pv; pv = pv->next; } if (pv == NULL) { rc = NW_PROT_VIOLATION; } else { if (projected_buffer_deallocate(task->map, pv->buf_start, pv->buf_end) != KERN_SUCCESS) { rc = NW_INCONSISTENCY; printf("Endpoint deallocate: inconsistency p. buffer\n"); } else { if (pv_previous == NULL) hect[ep].pv = pv->next; else pv_previous->next = pv->next; pv->next = nw_free_pv; nw_free_pv = pv; owned = task->nw_ep_owned; owned_previous = NULL; while (owned != NULL && owned->ep != ep) { owned_previous = owned; owned = owned->next; } if (owned == NULL) { rc = NW_INCONSISTENCY; printf("Endpoint deallocate: inconsistency owned\n"); } else { if (owned_previous == NULL) task->nw_ep_owned = owned->next; else owned_previous->next = owned->next; owned->next = nw_free_waited; nw_free_waited = owned; if (hect[ep].sig_waiter != NULL && hect[ep].sig_waiter->task == task) { /* if (!shutdown)*/ mk_deliver_result(hect[ep].sig_waiter, NW_ABORTED); hect[ep].sig_waiter = NULL; } w = hect[ep].rx_first; w_previous = NULL; while (w != NULL) { if (w->waiter->task == task) { /* if (!shutdown)*/ mk_deliver_result(w->waiter, NULL); w_next = w->next; if (w_previous == NULL) hect[ep].rx_first = w_next; else w_previous->next = w_next; w->next = nw_free_waiter; nw_free_waiter = w; w = w_next; } else { w_previous = w; w = w->next; } } if (hect[ep].rx_first == NULL) hect[ep].rx_last = NULL; w = hect[ep].tx_first; w_previous = NULL; while (w != NULL) { if (w->waiter->task == task) { /* if (!shutdown)*/ mk_deliver_result(w->waiter, NW_ABORTED); w_next = w->next; if (w_previous == NULL) hect[ep].tx_first = w_next; else w_previous->next = w_next; w->next = nw_free_waiter; nw_free_waiter = w; w = w_next; } else { w_previous = w; w = w->next; } } if (hect[ep].tx_first == NULL) hect[ep].tx_last = NULL; if (hect[ep].pv == NULL) { if (ect[ep].state != NW_UNCONNECTED) { rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry-> close)) (ep); if (rc == NW_SYNCH) { hect[ep].sig_waiter = current_thread(); assert_wait(0, TRUE); simple_unlock(&nw_simple_lock); thread_block((void (*)()) 0); } } rc = nc_endpoint_deallocate(ep); } } } } } nw_unlock(); return rc; }
nw_result mk_endpoint_allocate_internal(nw_ep_t epp, nw_protocol protocol, nw_acceptance accept, u_int buffer_size, boolean_t system) { nw_result rc; u_int ep; vm_offset_t kernel_addr, user_addr; nw_pv_t pv; nw_ep_owned_t owned; ep = *epp; if (buffer_size == 0) buffer_size = 0x1000; else buffer_size = (buffer_size + 0xfff) & ~0xfff; nw_lock(); if (ep >= MAX_EP || (pv = hect[ep].pv) != NULL) { rc = NW_BAD_EP; } else if (nw_free_pv == NULL || nw_free_waited == NULL) { rc = NW_NO_EP; } else if (projected_buffer_allocate(current_task()->map, buffer_size, 0, &kernel_addr, &user_addr, VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE) != KERN_SUCCESS) { rc = NW_NO_RESOURCES; } else { rc = nc_endpoint_allocate(epp, protocol, accept, (char *) kernel_addr, buffer_size); if (rc == NW_NO_EP && (ep = *epp) != 0) { rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry-> close)) (ep); if (rc == NW_SYNCH) { hect[ep].sig_waiter = current_thread(); assert_wait(0, TRUE); simple_unlock(&nw_simple_lock); thread_block((void (*)()) 0); } rc = nc_endpoint_deallocate(ep); if (rc == NW_SUCCESS) { nc_line_update(&ect[ep].conn->peer, 0); rc = nc_endpoint_allocate(epp, protocol, accept, (char *) kernel_addr, buffer_size); } } if (rc == NW_SUCCESS) { ep = *epp; if (system) { hect[ep].pv = NULL; } else { hect[ep].pv = nw_free_pv; nw_free_pv = nw_free_pv->next; hect[ep].pv->owner = current_task(); hect[ep].pv->buf_start = (char *) user_addr; hect[ep].pv->buf_end = (char *) user_addr + buffer_size; hect[ep].pv->next = NULL; } hect[ep].sig_waiter = NULL; hect[ep].rx_first = NULL; hect[ep].rx_last = NULL; hect[ep].tx_first = NULL; hect[ep].tx_last = NULL; owned = nw_free_waited; nw_free_waited = nw_free_waited->next; owned->ep = ep; owned->next = current_task()->nw_ep_owned; current_task()->nw_ep_owned = owned; } else { projected_buffer_deallocate(current_task()->map, user_addr, user_addr + buffer_size); } } nw_unlock(); return rc; }