static HRESULT StdMemAllocator_Free(IMemAllocator * iface) { StdMemAllocator *This = StdMemAllocator_from_IMemAllocator(iface); struct list * cursor; if (!list_empty(&This->base.used_list)) { WARN("Freeing allocator with outstanding samples!\n"); while ((cursor = list_head(&This->base.used_list)) != NULL) { StdMediaSample2 *pSample; list_remove(cursor); pSample = LIST_ENTRY(cursor, StdMediaSample2, listentry); pSample->pParent = NULL; } } while ((cursor = list_head(&This->base.free_list)) != NULL) { list_remove(cursor); StdMediaSample2_Delete(LIST_ENTRY(cursor, StdMediaSample2, listentry)); } /* free memory */ if (!VirtualFree(This->pMemory, 0, MEM_RELEASE)) { ERR("Couldn't free memory. Error: %u\n", GetLastError()); return HRESULT_FROM_WIN32(GetLastError()); } return S_OK; }
t_history_entry *get_history_entry_from(uint64_t index) { uint64_t reverse_index; t_history_entry *entry; t_list *pos; uint64_t i; if (!index || index > g_history_size + 1) return (NULL); reverse_index = (g_history_size + 1) - index; entry = NULL; if (reverse_index < index) { return (get_history_entry_at(reverse_index)); } else { pos = (&(g_history->list))->prev; i = 1; while (pos != &(g_history->list) && i < index) { i++; pos = pos->prev; } entry = LIST_ENTRY(pos, t_history_entry, list); } return (entry); }
LogApp::~LogApp() { while (!clientList.isEmpty()) { Client *c = LIST_ENTRY(clientList.removeHead(), Client, list); delete c; } }
void release_typelib(void) { dispex_data_t *iter; unsigned i; while(!list_empty(&dispex_data_list)) { iter = LIST_ENTRY(list_head(&dispex_data_list), dispex_data_t, entry); list_remove(&iter->entry); for(i=0; i < iter->func_cnt; i++) SysFreeString(iter->funcs[i].name); heap_free(iter->funcs); heap_free(iter->name_table); heap_free(iter); } for(i=0; i < ARRAY_SIZE(typeinfos); i++) if(typeinfos[i]) ITypeInfo_Release(typeinfos[i]); for(i=0; i < ARRAY_SIZE(typelib); i++) if(typelib[i]) ITypeLib_Release(typelib[i]); DeleteCriticalSection(&cs_dispex_static_data); }
void handle_pending_hsrs(void) { extern int32_t sched_lock; int nr; list_head_t *list, *node; hsr_t *hsr; if (sched_lock > 0) return; // just only to prevent hisrs to schedule ++sched_lock; while (1) { nr = HAL_FIND_FIRST_SET(hsr_bitmap); if (nr < 0) break; list = hsr_array + nr; node = LIST_FIRST(list); BUG_ON(NULL == node); hsr = LIST_ENTRY(node, hsr_t, node); hsr->function(hsr->data); HAL_DISABLE_INTERRUPTS(); --hsr->count; if (hsr->count <= 0) LIST_DEL(node); if (LIST_EMPTY(list)) hsr_bitmap &= ~(1 << nr); HAL_ENABLE_INTERRUPTS(); } --sched_lock; }
void release_typelib(void) { dispex_data_t *iter; unsigned i; while(!list_empty(&dispex_data_list)) { iter = LIST_ENTRY(list_head(&dispex_data_list), dispex_data_t, entry); list_remove(&iter->entry); for(i=0; i < iter->func_cnt; i++) SysFreeString(iter->funcs[i].name); heap_free(iter->funcs); heap_free(iter->name_table); heap_free(iter); } if(!typelib) return; for(i=0; i < sizeof(typeinfos)/sizeof(*typeinfos); i++) if(typeinfos[i]) ITypeInfo_Release(typeinfos[i]); ITypeLib_Release(typelib); }
static HRESULT WINAPI BaseMemAllocator_GetBuffer(IMemAllocator * iface, IMediaSample ** pSample, REFERENCE_TIME *pStartTime, REFERENCE_TIME *pEndTime, DWORD dwFlags) { BaseMemAllocator *This = impl_from_IMemAllocator(iface); HRESULT hr = S_OK; /* NOTE: The pStartTime and pEndTime parameters are not applied to the sample. * The allocator might use these values to determine which buffer it retrieves */ TRACE("(%p)->(%p, %p, %p, %x)\n", This, pSample, pStartTime, pEndTime, dwFlags); *pSample = NULL; EnterCriticalSection(This->pCritSect); if (!This->bCommitted || This->bDecommitQueued) { WARN("Not committed\n"); hr = VFW_E_NOT_COMMITTED; } else ++This->lWaiting; LeaveCriticalSection(This->pCritSect); if (FAILED(hr)) return hr; if (WaitForSingleObject(This->hSemWaiting, (dwFlags & AM_GBF_NOWAIT) ? 0 : INFINITE) != WAIT_OBJECT_0) { EnterCriticalSection(This->pCritSect); --This->lWaiting; LeaveCriticalSection(This->pCritSect); WARN("Timed out\n"); return VFW_E_TIMEOUT; } EnterCriticalSection(This->pCritSect); { --This->lWaiting; if (!This->bCommitted) hr = VFW_E_NOT_COMMITTED; else if (This->bDecommitQueued) hr = VFW_E_TIMEOUT; else { StdMediaSample2 *ms; struct list * free = list_head(&This->free_list); list_remove(free); list_add_head(&This->used_list, free); ms = LIST_ENTRY(free, StdMediaSample2, listentry); assert(ms->ref == 0); *pSample = (IMediaSample *)&ms->IMediaSample2_iface; IMediaSample_AddRef(*pSample); } } LeaveCriticalSection(This->pCritSect); if (hr != S_OK) WARN("%08x\n", hr); return hr; }
char *array_list_next(ARRAY_LIST_ITERATOR *iter) { if(iter == NULL || iter->element->list.next == &iter->alist->used_list) { return NULL; } iter->element = LIST_ENTRY(iter->element->list.next, ARRAY_LIST_ELEMENT, list); return iter->element->data; }
// 说明:获取下一个就就绪任务 // 返回:如果没有,返回NULL static TASK* TaskNextReady(void) { // 如果存在下一个就绪任务 if (!ListEmpty(&list_ready_tasks) && list_ready_tasks.next != ¤t_task->list_ready) { return LIST_ENTRY(list_ready_tasks.next, TASK, list_ready); } return NULL; }
inline ClientSession *SessionHash::get(ListHead *list, const char *name) { ListHead *pos; LIST_FOR_EACH(pos, list) { ClientSession *s = LIST_ENTRY(pos, ClientSession, hashItem); if (strcmp(s->userName, name) == 0) return s; }
/** * 获取目录总大小. * @param dir 目录路径 * @return 目录大小 */ int64_t AnalogFS::ftwsize(const char *dir) { struct stat64 st; struct dirent *dirt; DIR *dirs; char tpath[MAX_PATHLEN]; int64_t sumsize; LIST_HEAD(listhead, entry) head; struct entry { LIST_ENTRY(entry) entries; void *data; } *item;
static GUEST_PCI_DEVICES* find_guest_devices(GUEST_ID guest_id) { GUEST_PCI_DEVICES *guest_devices = NULL; LIST_ELEMENT *guest_iter = NULL; BOOLEAN guest_found = FALSE; LIST_FOR_EACH(guest_pci_devices, guest_iter) { guest_devices = LIST_ENTRY(guest_iter, GUEST_PCI_DEVICES, guests); if(guest_devices->guest_id == guest_id) { guest_found = TRUE; break; } }
static ULONG WINAPI LinuxInputEffectImpl_Release(LPDIRECTINPUTEFFECT iface) { LinuxInputEffectImpl *This = impl_from_IDirectInputEffect(iface); ULONG ref = InterlockedDecrement(&(This->ref)); if (ref == 0) { LinuxInputEffectImpl_Stop(iface); LinuxInputEffectImpl_Unload(iface); list_remove(This->entry); HeapFree(GetProcessHeap(), 0, LIST_ENTRY(This->entry, effect_list_item, entry)); HeapFree(GetProcessHeap(), 0, This); } return ref; }
static void free_tls_list(void) { tls_data_t *data; if(urlmon_tls == TLS_OUT_OF_INDEXES) return; while(!list_empty(&tls_list)) { data = LIST_ENTRY(list_head(&tls_list), tls_data_t, entry); list_remove(&data->entry); heap_free(data); } TlsFree(urlmon_tls); }
void TaskDelSuspudByID(int32_t task_id) { LIST *p, *head; TASK *task; p = head = &list_suspud_tasks; while (p->next != head) { p = p->next; task = LIST_ENTRY(p, TASK, list_suspud); if (task->task_id == task_id) { // found TaskDelReady(task); break; } } }
// 说明:任务启动 void TaskStart(int task_id) { TASK *task = NULL; LIST *head = &list_suspud_tasks; LIST *p = head; while (p->next != head) { p = p->next; task = LIST_ENTRY(p, TASK, list_suspud); if (task->task_id == task_id) { TaskDelSuspud(task); TaskAddReady(task); TaskSchedule(); return; } } }
char *array_list_first(ARRAY_LIST_HANDLE alist, ARRAY_LIST_ITERATOR *iter) { ARRAY_LIST_ELEMENT *element; char* data; if(list_is_empty(&alist->used_list)) { return NULL; } element = LIST_ENTRY(alist->used_list.next, ARRAY_LIST_ELEMENT, list); data = element->data; if(iter != NULL) { iter->alist = alist; iter->element = element; } return data; }
// for testing int32_t TaskNumReady() { TASK *task; int32_t num = 0; LIST *head, *p; head = &list_ready_tasks; p = head; while (p->next != head) { num++; p = p->next; task = LIST_ENTRY(p, TASK, list_ready); TEE_Printf("%d : tid:0x%x name:%s\n", num, task->task_id, task->task_name); } return num; }
static HRESULT WINAPI BaseMemAllocator_GetBuffer(IMemAllocator * iface, IMediaSample ** pSample, REFERENCE_TIME *pStartTime, REFERENCE_TIME *pEndTime, DWORD dwFlags) { BaseMemAllocator *This = (BaseMemAllocator *)iface; HRESULT hr = S_OK; /* NOTE: The pStartTime and pEndTime parameters are not applied to the sample. * The allocator might use these values to determine which buffer it retrieves */ TRACE("(%p)->(%p, %p, %p, %lx)\n", This, pSample, pStartTime, pEndTime, dwFlags); *pSample = NULL; if (!This->bCommitted) return VFW_E_NOT_COMMITTED; This->lWaiting++; if (WaitForSingleObject(This->hSemWaiting, (dwFlags & AM_GBF_NOWAIT) ? 0 : INFINITE) != WAIT_OBJECT_0) { This->lWaiting--; return VFW_E_TIMEOUT; } This->lWaiting--; EnterCriticalSection(&This->csState); { if (!This->bCommitted) hr = VFW_E_NOT_COMMITTED; else if (This->bDecommitQueued) hr = VFW_E_TIMEOUT; else { struct list * free = list_head(&This->free_list); list_remove(free); list_add_head(&This->used_list, free); *pSample = (IMediaSample *)LIST_ENTRY(free, StdMediaSample2, listentry); assert(((StdMediaSample2 *)*pSample)->ref == 0); IMediaSample_AddRef(*pSample); } } LeaveCriticalSection(&This->csState); return hr; }
BOOLEAN array_list_add(ARRAY_LIST_HANDLE alist, void* data) { LIST_ELEMENT *free_element = NULL; ARRAY_LIST_ELEMENT *free_list_entry = NULL; if(list_is_empty(&alist->free_list) || alist == NULL || data == NULL) { return FALSE; } free_element = alist->free_list.next; list_remove(free_element); list_add(alist->used_list.prev, free_element); alist->num_of_used_elements++; free_list_entry = LIST_ENTRY(free_element, ARRAY_LIST_ELEMENT, list); vmm_memcpy(free_list_entry->data, data, alist->element_size); return TRUE; }
void collect_objects(script_ctx_t *ctx) { vbdisp_t *iter, *iter2; LIST_FOR_EACH_ENTRY_SAFE(iter, iter2, &ctx->objects, vbdisp_t, entry) run_terminator(iter); while(!list_empty(&ctx->objects)) { iter = LIST_ENTRY(list_head(&ctx->objects), vbdisp_t, entry); IDispatchEx_AddRef(&iter->IDispatchEx_iface); clean_props(iter); iter->desc = NULL; list_remove(&iter->entry); list_init(&iter->entry); IDispatchEx_Release(&iter->IDispatchEx_iface); } }
static HRESULT StdMemAllocator_Free(IMemAllocator * iface) { StdMemAllocator *This = (StdMemAllocator *)iface; struct list * cursor; assert(list_empty(&This->base.used_list)); while ((cursor = list_head(&This->base.free_list)) != NULL) { list_remove(cursor); StdMediaSample2_Delete(LIST_ENTRY(cursor, StdMediaSample2, listentry)); } /* free memory */ if (!VirtualFree(This->pMemory, 0, MEM_RELEASE)) { ERR("Couldn't free memory. Error: %ld\n", GetLastError()); return HRESULT_FROM_WIN32(GetLastError()); } return S_OK; }
static VMEXIT_HANDLING_STATUS vmexit_cpuid_instruction(GUEST_CPU_HANDLE gcpu) { CPUID_PARAMS cpuid_params; UINT32 req_id; LIST_ELEMENT *filter_desc_list= guest_get_cpuid_list(gcpu_guest_handle(gcpu)); LIST_ELEMENT *list_iterator; CPUID_FILTER_DESCRIPTOR *p_filter_desc; cpuid_params.m_rax = gcpu_get_native_gp_reg(gcpu, IA32_REG_RAX); cpuid_params.m_rbx = gcpu_get_native_gp_reg(gcpu, IA32_REG_RBX); cpuid_params.m_rcx = gcpu_get_native_gp_reg(gcpu, IA32_REG_RCX); cpuid_params.m_rdx = gcpu_get_native_gp_reg(gcpu, IA32_REG_RDX); req_id = (UINT32)cpuid_params.m_rax; // get the real h/w values hw_cpuid(&cpuid_params); // pass to filters for virtualization LIST_FOR_EACH(filter_desc_list, list_iterator) { p_filter_desc = LIST_ENTRY(list_iterator, CPUID_FILTER_DESCRIPTOR, list); if (p_filter_desc->cpuid == req_id) { p_filter_desc->handler(gcpu, &cpuid_params); } }
t_json_entity *json_get_entity_with_key(const t_json_entity *object, const char *key) { t_list *pos; t_json_entity *entry; t_json_member *member; entry = NULL; if (object && object->type == e_json_type_object) { pos = object->value.object->list.next; while (pos != &(object->value.object->list)) { member = LIST_ENTRY(pos, t_json_object, list)->member->value.member; entry = member->value; if (entry && ft_strequ(key, member->key->value.string)) { break ; } pos = pos->next; } } return (entry); }
static nsresult run_insert_script(HTMLDocumentNode *doc, nsISupports *script_iface, nsISupports *parser_iface) { nsIDOMHTMLScriptElement *nsscript; HTMLScriptElement *script_elem; nsIParser *nsparser = NULL; script_queue_entry_t *iter; HTMLInnerWindow *window; nsresult nsres; HRESULT hres; TRACE("(%p)->(%p)\n", doc, script_iface); window = doc->window; if(!window) return NS_OK; nsres = nsISupports_QueryInterface(script_iface, &IID_nsIDOMHTMLScriptElement, (void**)&nsscript); if(NS_FAILED(nsres)) { ERR("Could not get nsIDOMHTMLScriptElement: %08x\n", nsres); return nsres; } if(parser_iface) { nsres = nsISupports_QueryInterface(parser_iface, &IID_nsIParser, (void**)&nsparser); if(NS_FAILED(nsres)) { ERR("Could not get nsIParser iface: %08x\n", nsres); nsparser = NULL; } } hres = script_elem_from_nsscript(doc, nsscript, &script_elem); nsIDOMHTMLScriptElement_Release(nsscript); if(FAILED(hres)) return NS_ERROR_FAILURE; if(nsparser) { nsIParser_BeginEvaluatingParserInsertedScript(nsparser); window->parser_callback_cnt++; } IHTMLWindow2_AddRef(&window->base.IHTMLWindow2_iface); doc_insert_script(window, script_elem); while(!list_empty(&window->script_queue)) { iter = LIST_ENTRY(list_head(&window->script_queue), script_queue_entry_t, entry); list_remove(&iter->entry); if(!iter->script->parsed) doc_insert_script(window, iter->script); IHTMLScriptElement_Release(&iter->script->IHTMLScriptElement_iface); heap_free(iter); } IHTMLWindow2_Release(&window->base.IHTMLWindow2_iface); if(nsparser) { window->parser_callback_cnt--; nsIParser_EndEvaluatingParserInsertedScript(nsparser); nsIParser_Release(nsparser); } IHTMLScriptElement_Release(&script_elem->IHTMLScriptElement_iface); return NS_OK; }
int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { #if SOCKNAL_SINGLE_FRAG_TX struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = tx->tx_niov; #endif struct socket *sock = conn->ksnc_sock; int nob; int rc; int i; struct uio suio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* This will be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i] = tx->tx_iov[i]; nob += scratchiov[i].iov_len; } suio.uio_resid = nob; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags); CFS_NET_EX; /* NB there is no return value can indicate how many * have been sent and how many resid, we have to get * sent bytes from suio. */ if (rc != 0) { if (suio.uio_resid != nob &&\ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* We have sent something */ rc = nob - suio.uio_resid; else if ( rc == EWOULDBLOCK ) /* Actually, EAGAIN and EWOULDBLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else /* rc == 0 */ rc = nob - suio.uio_resid; return rc; } int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = tx->tx_nkiov; #endif struct socket *sock = conn->ksnc_sock; lnet_kiov_t *kiov = tx->tx_kiov; int nob; int rc; int i; struct uio suio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* It should be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } suio.uio_resid = nob; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, flags); CFS_NET_EX; for (i = 0; i < niov; i++) cfs_kunmap(kiov[i].kiov_page); if (rc != 0) { if (suio.uio_resid != nob &&\ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* We have sent something */ rc = nob - suio.uio_resid; else if ( rc == EWOULDBLOCK ) /* EAGAIN and EWOULD BLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else /* rc == 0 */ rc = nob - suio.uio_resid; return rc; } /* * liang: Hack of inpcb and tcpcb. * To get tcpcb of a socket, and call tcp_output * to send quick ack. */ struct ks_tseg_qent{ int foo; }; struct ks_tcptemp{ int foo; }; LIST_HEAD(ks_tsegqe_head, ks_tseg_qent); struct ks_tcpcb { struct ks_tsegqe_head t_segq; int t_dupacks; struct ks_tcptemp *unused; int t_timer[4]; struct inpcb *t_inpcb; int t_state; u_int t_flags; /* * There are more fields but we dont need * ...... */ }; #define TF_ACKNOW 0x00001 #define TF_DELACK 0x00002 struct ks_inpcb { LIST_ENTRY(ks_inpcb) inp_hash; struct in_addr reserved1; struct in_addr reserved2; u_short inp_fport; u_short inp_lport; LIST_ENTRY(inpcb) inp_list; caddr_t inp_ppcb; /* * There are more fields but we dont need * ...... */ }; #define ks_sotoinpcb(so) ((struct ks_inpcb *)(so)->so_pcb) #define ks_intotcpcb(ip) ((struct ks_tcpcb *)(ip)->inp_ppcb) #define ks_sototcpcb(so) (intotcpcb(sotoinpcb(so))) void ksocknal_lib_eager_ack (ksock_conn_t *conn) { struct socket *sock = conn->ksnc_sock; struct ks_inpcb *inp = ks_sotoinpcb(sock); struct ks_tcpcb *tp = ks_intotcpcb(inp); int s; CFS_DECL_NET_DATA; extern int tcp_output(register struct ks_tcpcb *tp); CFS_NET_IN; s = splnet(); /* * No TCP_QUICKACK supported in BSD, so I have to call tcp_fasttimo * to send immediate ACK. */ if (tp && tp->t_flags & TF_DELACK){ tp->t_flags &= ~TF_DELACK; tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); } splx(s); CFS_NET_EX; return; } int ksocknal_lib_recv_iov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = conn->ksnc_rx_niov; #endif struct iovec *iov = conn->ksnc_rx_iov; int nob; int rc; int i; struct uio ruio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, /* It should be valued after a while */ .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i] = iov[i]; nob += scratchiov[i].iov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); ruio.uio_resid = nob; CFS_NET_IN; rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, &flags); CFS_NET_EX; if (rc){ if (ruio.uio_resid != nob && \ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK || rc == EAGAIN)) /* data particially received */ rc = nob - ruio.uio_resid; else if (rc == EWOULDBLOCK) /* EAGAIN and EWOULD BLOCK have same value in OSX */ rc = -EAGAIN; else rc = -rc; } else rc = nob - ruio.uio_resid; return (rc); } int ksocknal_lib_recv_kiov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct iovec scratch; struct iovec *scratchiov = &scratch; unsigned int niov = 1; #else struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = conn->ksnc_rx_nkiov; #endif lnet_kiov_t *kiov = conn->ksnc_rx_kiov; int nob; int rc; int i; struct uio ruio = { .uio_iov = scratchiov, .uio_iovcnt = niov, .uio_offset = 0, .uio_resid = 0, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; int flags = MSG_DONTWAIT; CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); ruio.uio_resid = nob; CFS_NET_IN; rc = soreceive(conn->ksnc_sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, NULL, &flags); CFS_NET_EX; for (i = 0; i < niov; i++) cfs_kunmap(kiov[i].kiov_page); if (rc){ if (ruio.uio_resid != nob && \ (rc == ERESTART || rc == EINTR || rc == EWOULDBLOCK)) /* data particially received */ rc = nob - ruio.uio_resid; else if (rc == EWOULDBLOCK) /* receive blocked, EWOULDBLOCK == EAGAIN */ rc = -EAGAIN; else rc = -rc; } else rc = nob - ruio.uio_resid; return (rc); } int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; int rc; rc = ksocknal_connsock_addref(conn); if (rc != 0) { LASSERT (conn->ksnc_closing); *txmem = *rxmem = *nagle = 0; return -ESHUTDOWN; } rc = libcfs_sock_getbuf(sock, txmem, rxmem); if (rc == 0) { struct sockopt sopt; int len; CFS_DECL_NET_DATA; len = sizeof(*nagle); bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = nagle; sopt.sopt_valsize = len; CFS_NET_IN; rc = -sogetopt(sock, &sopt); CFS_NET_EX; } ksocknal_connsock_decref(conn); if (rc == 0) *nagle = !*nagle; else *txmem = *rxmem = *nagle = 0; return (rc); } int ksocknal_lib_setup_sock (struct socket *so) { struct sockopt sopt; int rc; int option; int keep_idle; int keep_intvl; int keep_count; int do_keepalive; struct linger linger; CFS_DECL_NET_DATA; rc = libcfs_sock_setbuf(so, *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); if (rc != 0) { CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n", *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size, rc); return (rc); } /* Ensure this socket aborts active sends immediately when we close * it. */ bzero(&sopt, sizeof sopt); linger.l_onoff = 0; linger.l_linger = 0; sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_LINGER; sopt.sopt_val = &linger; sopt.sopt_valsize = sizeof(linger); CFS_NET_IN; rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set SO_LINGER: %d\n", rc); goto out; } if (!*ksocknal_tunables.ksnd_nagle) { option = 1; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't disable nagle: %d\n", rc); goto out; } } /* snapshot tunables */ keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; keep_count = *ksocknal_tunables.ksnd_keepalive_count; keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); option = (do_keepalive ? 1 : 0); bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_KEEPALIVE; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set SO_KEEPALIVE: %d\n", rc); goto out; } if (!do_keepalive) { /* no more setting, just return */ rc = 0; goto out; } bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_KEEPALIVE; sopt.sopt_val = &keep_idle; sopt.sopt_valsize = sizeof(keep_idle); rc = -sosetopt(so, &sopt); if (rc != 0) { CERROR ("Can't set TCP_KEEPALIVE : %d\n", rc); goto out; } out: CFS_NET_EX; return (rc); } void ksocknal_lib_push_conn(ksock_conn_t *conn) { struct socket *sock; struct sockopt sopt; int val = 1; int rc; CFS_DECL_NET_DATA; rc = ksocknal_connsock_addref(conn); if (rc != 0) /* being shut down */ return; sock = conn->ksnc_sock; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = IPPROTO_TCP; sopt.sopt_name = TCP_NODELAY; sopt.sopt_val = &val; sopt.sopt_valsize = sizeof val; CFS_NET_IN; sosetopt(sock, &sopt); CFS_NET_EX; ksocknal_connsock_decref(conn); return; } extern void ksocknal_read_callback (ksock_conn_t *conn); extern void ksocknal_write_callback (ksock_conn_t *conn); static void ksocknal_upcall(struct socket *so, caddr_t arg, int waitf) { ksock_conn_t *conn = (ksock_conn_t *)arg; ENTRY; read_lock (&ksocknal_data.ksnd_global_lock); if (conn == NULL) goto out; if (so->so_rcv.sb_flags & SB_UPCALL) { extern int soreadable(struct socket *so); if (conn->ksnc_rx_nob_wanted && soreadable(so)) /* To verify whether the upcall is for receive */ ksocknal_read_callback (conn); } /* go foward? */ if (so->so_snd.sb_flags & SB_UPCALL){ extern int sowriteable(struct socket *so); if (sowriteable(so)) /* socket is writable */ ksocknal_write_callback(conn); } out: read_unlock (&ksocknal_data.ksnd_global_lock); EXIT; } void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn) { /* No callback need to save in osx */ return; } void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) { CFS_DECL_NET_DATA; CFS_NET_IN; sock->so_upcallarg = (void *)conn; sock->so_upcall = ksocknal_upcall; sock->so_snd.sb_timeo = 0; sock->so_rcv.sb_timeo = cfs_time_seconds(2); sock->so_rcv.sb_flags |= SB_UPCALL; sock->so_snd.sb_flags |= SB_UPCALL; CFS_NET_EX; return; } void ksocknal_lib_act_callback(struct socket *sock, ksock_conn_t *conn) { CFS_DECL_NET_DATA; CFS_NET_IN; ksocknal_upcall (sock, (void *)conn, 0); CFS_NET_EX; }