void *nn_chunk_alloc (size_t size, int type) { size_t sz; struct nn_chunk *self; /* Allocate the actual memory depending on the type. */ sz = sizeof (struct nn_chunk) + 2 * sizeof (uint32_t) + size; switch (type) { case 0: self = nn_alloc (sz, "message chunk"); break; default: return NULL; } alloc_assert (self); /* Fill in the chunk header. */ nn_atomic_init (&self->refcount, 1); self->size = size; self->ffn = nn_chunk_default_free; /* Fill in the size of the empty space between the chunk header and the message. */ nn_putl ((uint8_t*) ((uint32_t*) (self + 1)), 0); /* Fill in the tag. */ nn_putl ((uint8_t*) ((((uint32_t*) (self + 1))) + 1), NN_CHUNK_TAG); return ((uint8_t*) (self + 1)) + 2 * sizeof (uint32_t); }
static int nn_respondent_send (struct nn_sockbase *self, struct nn_msg *msg) { int rc; struct nn_respondent *respondent; respondent = nn_cont (self, struct nn_respondent, xrespondent.sockbase); /* If there's no survey going on, report EFSM error. */ if (nn_slow (!(respondent->flags & NN_RESPONDENT_INPROGRESS))) return -EFSM; /* Tag the message with survey ID. */ nn_assert (nn_chunkref_size (&msg->sphdr) == 0); nn_chunkref_term (&msg->sphdr); nn_chunkref_init (&msg->sphdr, 4); nn_putl (nn_chunkref_data (&msg->sphdr), respondent->surveyid); /* Try to send the message. If it cannot be sent due to pushback, drop it silently. */ rc = nn_xrespondent_send (&respondent->xrespondent.sockbase, msg); if (nn_slow (rc == -EAGAIN)) { nn_msg_term (msg); return -EAGAIN; } errnum_assert (rc == 0, -rc); /* Remember that no survey is being processed. */ respondent->flags &= ~NN_RESPONDENT_INPROGRESS; return 0; }
static int nn_surveyor_send (struct nn_sockbase *self, struct nn_msg *msg) { int rc; struct nn_surveyor *surveyor; surveyor = nn_cont (self, struct nn_surveyor, xsurveyor.sockbase); /* Cancel any ongoing survey. */ if (nn_slow (surveyor->flags & NN_SURVEYOR_INPROGRESS)) { surveyor->flags &= ~NN_SURVEYOR_INPROGRESS; nn_timer_stop (&surveyor->deadline_timer); } /* Generate new survey ID. */ ++surveyor->surveyid; /* Tag the survey body with survey ID. */ nn_assert (nn_chunkref_size (&msg->hdr) == 0); nn_chunkref_term (&msg->hdr); nn_chunkref_init (&msg->hdr, 4); nn_putl (nn_chunkref_data (&msg->hdr), surveyor->surveyid); /* Send the survey. */ rc = nn_xsurveyor_send (&surveyor->xsurveyor.sockbase, msg); errnum_assert (rc == 0, -rc); surveyor->flags |= NN_SURVEYOR_INPROGRESS; /* Set up the re-send timer. */ nn_timer_start (&surveyor->deadline_timer, surveyor->deadline); return 0; }
void *nn_chunk_trim (void *p, size_t n) { struct nn_chunk *self; self = nn_chunk_getptr (p); /* Sanity check. We cannot trim more bytes than there are in the chunk. */ nn_assert (n >= 0 && n <= self->size); /* Adjust the chunk header. */ p = ((uint8_t*) p) + n; nn_putl ((uint8_t*) (((uint32_t*) p) - 1), NN_CHUNK_TAG); nn_putl ((uint8_t*) (((uint32_t*) p) - 2), (uint8_t*) p - (uint8_t*) self - 2 * sizeof (uint32_t) - sizeof (struct nn_chunk)); /* Adjust the size of the message. */ self->size -= n; return p; }
int nn_chunk_alloc (size_t size, int type, void **result) { size_t sz; struct nn_chunk *self; const size_t hdrsz = sizeof (struct nn_chunk) + 2 * sizeof (uint32_t); /* Compute total size to be allocated. Check for overflow. */ sz = hdrsz + size; if (nn_slow (sz < hdrsz)) return -ENOMEM; /* Allocate the actual memory depending on the type. */ switch (type) { case 0: self = nn_alloc (sz, "message chunk"); break; default: return -EINVAL; } if (nn_slow (!self)) return -ENOMEM; /* Fill in the chunk header. */ nn_atomic_init (&self->refcount, 1); self->size = size; self->ffn = nn_chunk_default_free; /* Fill in the size of the empty space between the chunk header and the message. */ nn_putl ((uint8_t*) ((uint32_t*) (self + 1)), 0); /* Fill in the tag. */ nn_putl ((uint8_t*) ((((uint32_t*) (self + 1))) + 1), NN_CHUNK_TAG); *result = ((uint8_t*) (self + 1)) + 2 * sizeof (uint32_t); return 0; }
static int nn_req_send (struct nn_sockbase *self, struct nn_msg *msg) { int rc; struct nn_req *req; req = nn_cont (self, struct nn_req, xreq.sockbase); /* If there's a request in progress, cancel it. */ if (nn_slow (req->state != NN_REQ_STATE_IDLE)) { if (req->state == NN_REQ_STATE_UNSENT || req->state == NN_REQ_STATE_SENT) nn_msg_term (&req->request); if (req->state == NN_REQ_STATE_RECEIVED) nn_msg_term (&req->reply); nn_timer_term (&req->resend_timer); req->state = NN_REQ_STATE_IDLE; } /* Generate new request ID for the new request and put it into message header. The most important bit is set to 1 to indicate that this is the bottom of the backtrace stack. */ ++req->reqid; nn_assert (nn_chunkref_size (&msg->hdr) == 0); nn_chunkref_term (&msg->hdr); nn_chunkref_init (&msg->hdr, 4); nn_putl (nn_chunkref_data (&msg->hdr), req->reqid | 0x80000000); /* Store the message so that it can be re-sent if there's no reply. Then make a copy of it and send it. */ nn_msg_cp (&req->request, msg); rc = nn_xreq_send (&req->xreq.sockbase, msg); errnum_assert (rc == 0 || rc == -EAGAIN, -rc); /* If the request cannot be sent at the moment switch to UNSENT state. It will be sent as soon as a new outbound pipe arrives. */ if (nn_slow (rc == -EAGAIN)) { nn_msg_term (msg); req->state = NN_REQ_STATE_UNSENT; return 0; } /* If the request was successgfully sent set up the re-send timer in case it get lost somewhere further out in the topology. */ nn_timer_start (&req->resend_timer, req->resend_ivl); req->state = NN_REQ_STATE_SENT; return 0; }
void nn_chunk_free (void *p) { struct nn_chunk *self; self = nn_chunk_getptr (p); /* Decrement the reference count. Actual deallocation happens only if it drops to zero. */ if (nn_atomic_dec (&self->refcount, 1) <= 1) { /* Mark chunk as deallocated. */ nn_putl ((uint8_t*) (((uint32_t*) p) - 1), NN_CHUNK_TAG_DEALLOCATED); /* Deallocate the resources held by the chunk. */ nn_atomic_term (&self->refcount); /* Deallocate the memory block according to the allocation mechanism specified. */ self->ffn (self); } }
static int nn_surveyor_send (struct nn_sockbase *self, struct nn_msg *msg) { struct nn_surveyor *surveyor; surveyor = nn_cont (self, struct nn_surveyor, xsurveyor.sockbase); /* Generate new survey ID. */ ++surveyor->surveyid; surveyor->surveyid |= 0x80000000; /* Tag the survey body with survey ID. */ nn_assert (nn_chunkref_size (&msg->sphdr) == 0); nn_chunkref_term (&msg->sphdr); nn_chunkref_init (&msg->sphdr, 4); nn_putl (nn_chunkref_data (&msg->sphdr), surveyor->surveyid); /* Store the survey, so that it can be sent later on. */ nn_msg_term (&surveyor->tosend); nn_msg_mv (&surveyor->tosend, msg); nn_msg_init (msg, 0); /* Cancel any ongoing survey, if any. */ if (nn_slow (nn_surveyor_inprogress (surveyor))) { /* First check whether the survey can be sent at all. */ if (!(nn_xsurveyor_events (&surveyor->xsurveyor.sockbase) & NN_SOCKBASE_EVENT_OUT)) return -EAGAIN; /* Cancel the current survey. */ nn_fsm_action (&surveyor->fsm, NN_SURVEYOR_ACTION_CANCEL); return 0; } /* Notify the state machine that the survey was started. */ nn_fsm_action (&surveyor->fsm, NN_SURVEYOR_ACTION_START); return 0; }
static int nn_req_send (struct nn_sockbase *self, struct nn_msg *msg) { struct nn_req *req; req = nn_cont (self, struct nn_req, xreq.sockbase); /* Generate new request ID for the new request and put it into message header. The most important bit is set to 1 to indicate that this is the bottom of the backtrace stack. */ ++req->reqid; nn_assert (nn_chunkref_size (&msg->hdr) == 0); nn_chunkref_term (&msg->hdr); nn_chunkref_init (&msg->hdr, 4); nn_putl (nn_chunkref_data (&msg->hdr), req->reqid | 0x80000000); /* Store the message so that it can be re-sent if there's no reply. */ nn_msg_term (&req->request); nn_msg_mv (&req->request, msg); /* Notify the state machine. */ nn_fsm_action (&req->fsm, NN_REQ_ACTION_SENT); return 0; }