int LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, lnet_handle_eq_t *handle) { lnet_eq_t *eq; LASSERT (the_lnet.ln_init); LASSERT (the_lnet.ln_refcount > 0); /* We need count to be a power of 2 so that when eq_{enq,deq}_seq * overflow, they don't skip entries, so the queue has the same * apparant capacity at all times */ if (count != LOWEST_BIT_SET(count)) { /* not a power of 2 already */ do { /* knock off all but the top bit... */ count &= ~LOWEST_BIT_SET (count); } while (count != LOWEST_BIT_SET(count)); count <<= 1; /* ...and round up */ } if (count == 0) /* catch bad parameter / overflow on roundup */ return (-EINVAL); eq = lnet_eq_alloc(); if (eq == NULL) return (-ENOMEM); LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t)); if (eq->eq_events == NULL) { LNET_LOCK(); lnet_eq_free (eq); LNET_UNLOCK(); return -ENOMEM; } /* NB this resets all event sequence numbers to 0, to be earlier * than eq_deq_seq */ memset(eq->eq_events, 0, count * sizeof(lnet_event_t)); eq->eq_deq_seq = 1; eq->eq_enq_seq = 1; eq->eq_size = count; eq->eq_refcount = 0; eq->eq_callback = callback; LNET_LOCK(); lnet_initialise_handle (&eq->eq_lh, LNET_COOKIE_TYPE_EQ); list_add (&eq->eq_list, &the_lnet.ln_active_eqs); LNET_UNLOCK(); lnet_eq2handle(handle, eq); return (0); }
void lnet_complete_msg_locked(lnet_msg_t *msg) { lnet_handle_wire_t ack_wmd; int rc; int status = msg->msg_ev.status; LASSERT (msg->msg_onactivelist); if (status == 0 && msg->msg_ack) { /* Only send an ACK if the PUT completed successfully */ lnet_return_credits_locked(msg); msg->msg_ack = 0; LNET_UNLOCK(); LASSERT(msg->msg_ev.type == LNET_EVENT_PUT); LASSERT(!msg->msg_routing); ack_wmd = msg->msg_hdr.msg.put.ack_wmd; lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0); msg->msg_hdr.msg.ack.dst_wmd = ack_wmd; msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); rc = lnet_send(msg->msg_ev.target.nid, msg); LNET_LOCK(); if (rc == 0) return; } else if (status == 0 && /* OK so far */ (msg->msg_routing && !msg->msg_sending)) { /* not forwarded */ LASSERT (!msg->msg_receiving); /* called back recv already */ LNET_UNLOCK(); rc = lnet_send(LNET_NID_ANY, msg); LNET_LOCK(); if (rc == 0) return; } lnet_return_credits_locked(msg); LASSERT (msg->msg_onactivelist); msg->msg_onactivelist = 0; list_del (&msg->msg_activelist); the_lnet.ln_counters.msgs_alloc--; lnet_msg_free(msg); }
int LNetEQFree(lnet_handle_eq_t eqh) { lnet_eq_t *eq; int size; lnet_event_t *events; LASSERT (the_lnet.ln_init); LASSERT (the_lnet.ln_refcount > 0); LNET_LOCK(); eq = lnet_handle2eq(&eqh); if (eq == NULL) { LNET_UNLOCK(); return (-ENOENT); } if (eq->eq_refcount != 0) { CDEBUG(D_NET, "Event queue (%d) busy on destroy.\n", eq->eq_refcount); LNET_UNLOCK(); return (-EBUSY); } /* stash for free after lock dropped */ events = eq->eq_events; size = eq->eq_size; lnet_invalidate_handle (&eq->eq_lh); list_del (&eq->eq_list); lnet_eq_free (eq); LNET_UNLOCK(); LIBCFS_FREE(events, size * sizeof (lnet_event_t)); return 0; }
int LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) { int i; int rc; #ifdef __KERNEL__ cfs_waitlink_t wl; cfs_time_t now; #else struct timeval then; struct timeval now; # ifdef HAVE_LIBPTHREAD struct timespec ts; # endif lnet_ni_t *eqwaitni = the_lnet.ln_eqwaitni; #endif ENTRY; LASSERT (the_lnet.ln_init); LASSERT (the_lnet.ln_refcount > 0); if (neq < 1) RETURN(-ENOENT); LNET_LOCK(); for (;;) { #ifndef __KERNEL__ LNET_UNLOCK(); /* Recursion breaker */ if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING && !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh)) lnet_router_checker(); LNET_LOCK(); #endif for (i = 0; i < neq; i++) { lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); if (eq == NULL) { LNET_UNLOCK(); RETURN(-ENOENT); } rc = lib_get_event (eq, event); if (rc != 0) { LNET_UNLOCK(); *which = i; RETURN(rc); } } #ifdef __KERNEL__ if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } cfs_waitlink_init(&wl); set_current_state(TASK_INTERRUPTIBLE); cfs_waitq_add(&the_lnet.ln_waitq, &wl); LNET_UNLOCK(); if (timeout_ms < 0) { cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE); } else { struct timeval tv; now = cfs_time_current(); cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE, cfs_time_seconds(timeout_ms)/1000); cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv); timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); cfs_waitq_del(&the_lnet.ln_waitq, &wl); #else if (eqwaitni != NULL) { /* I have a single NI that I have to call into, to get * events queued, or to block. */ lnet_ni_addref_locked(eqwaitni); LNET_UNLOCK(); if (timeout_ms <= 0) { (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); } else { gettimeofday(&then, NULL); (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); lnet_ni_decref_locked(eqwaitni); /* don't call into eqwaitni again if timeout has * expired */ if (timeout_ms == 0) eqwaitni = NULL; continue; /* go back and check for events */ } if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } # ifndef HAVE_LIBPTHREAD /* If I'm single-threaded, LNET fails at startup if it can't * set the_lnet.ln_eqwaitni correctly. */ LBUG(); # else if (timeout_ms < 0) { pthread_cond_wait(&the_lnet.ln_cond, &the_lnet.ln_lock); } else { gettimeofday(&then, NULL); ts.tv_sec = then.tv_sec + timeout_ms/1000; ts.tv_nsec = then.tv_usec * 1000 + (timeout_ms%1000) * 1000000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec++; ts.tv_nsec -= 1000000000; } pthread_cond_timedwait(&the_lnet.ln_cond, &the_lnet.ln_lock, &ts); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } # endif #endif } }
void lnet_finalize (__unusedx lnet_ni_t *ni, lnet_msg_t *msg, int status) { #ifdef __KERNEL__ int i; int my_slot; #endif lnet_libmd_t *md; LASSERT (!in_interrupt ()); if (msg == NULL) return; #if 0 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target), msg->msg_target_is_router ? "t" : "", msg->msg_routing ? "X" : "", msg->msg_ack ? "A" : "", msg->msg_sending ? "S" : "", msg->msg_receiving ? "R" : "", msg->msg_delayed ? "d" : "", msg->msg_txcredit ? "C" : "", msg->msg_peertxcredit ? "c" : "", msg->msg_rtrcredit ? "F" : "", msg->msg_peerrtrcredit ? "f" : "", msg->msg_onactivelist ? "!" : "", msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); #endif LNET_LOCK(); LASSERT (msg->msg_onactivelist); msg->msg_ev.status = status; md = msg->msg_md; if (md != NULL) { int unlink; /* Now it's safe to drop my caller's ref */ md->md_refcount--; LASSERT (md->md_refcount >= 0); unlink = lnet_md_unlinkable(md); msg->msg_ev.unlinked = unlink; if (md->md_eq != NULL) lnet_enq_event_locked(md->md_eq, &msg->msg_ev); if (unlink) lnet_md_unlink(md); msg->msg_md = NULL; } list_add_tail (&msg->msg_list, &the_lnet.ln_finalizeq); /* Recursion breaker. Don't complete the message here if I am (or * enough other threads are) already completing messages */ #ifdef __KERNEL__ my_slot = -1; for (i = 0; i < the_lnet.ln_nfinalizers; i++) { if (the_lnet.ln_finalizers[i] == cfs_current()) goto out; if (my_slot < 0 && the_lnet.ln_finalizers[i] == NULL) my_slot = i; } if (my_slot < 0) goto out; the_lnet.ln_finalizers[my_slot] = cfs_current(); #else if (the_lnet.ln_finalizing) goto out; the_lnet.ln_finalizing = 1; #endif while (!list_empty(&the_lnet.ln_finalizeq)) { msg = list_entry(the_lnet.ln_finalizeq.next, lnet_msg_t, msg_list); list_del(&msg->msg_list); /* NB drops and regains the lnet lock if it actually does * anything, so my finalizing friends can chomp along too */ lnet_complete_msg_locked(msg); } #ifdef __KERNEL__ the_lnet.ln_finalizers[my_slot] = NULL; #else the_lnet.ln_finalizing = 0; #endif out: LNET_UNLOCK(); }
int lnet_connect(cfs_socket_t **sockp, lnet_nid_t peer_nid, __u32 local_ip, __u32 peer_ip, int peer_port) { lnet_acceptor_connreq_t cr; cfs_socket_t *sock; int rc; int port; int fatal; CLASSERT (sizeof(cr) <= 16); /* not too big to be on the stack */ for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT; port >= LNET_ACCEPTOR_MIN_RESERVED_PORT; --port) { /* Iterate through reserved ports. */ rc = libcfs_sock_connect(&sock, &fatal, local_ip, port, peer_ip, peer_port); if (rc != 0) { if (fatal) goto failed; continue; } CLASSERT (LNET_PROTO_ACCEPTOR_VERSION == 1); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; cr.acr_nid = peer_nid; if (the_lnet.ln_testprotocompat != 0) { /* single-shot proto check */ LNET_LOCK(); if ((the_lnet.ln_testprotocompat & 4) != 0) { cr.acr_version++; the_lnet.ln_testprotocompat &= ~4; } if ((the_lnet.ln_testprotocompat & 8) != 0) { cr.acr_magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~8; } LNET_UNLOCK(); } rc = libcfs_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) goto failed_sock; *sockp = sock; return 0; } rc = -EADDRINUSE; goto failed; failed_sock: libcfs_sock_release(sock); failed: lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port); return rc; }