static int kvp_handle_handshake(struct hv_kvp_msg *msg) { int ret = 1; switch (msg->kvp_hdr.operation) { case KVP_OP_REGISTER: dm_reg_value = KVP_OP_REGISTER; pr_info("KVP: IP injection functionality not available\n"); pr_info("KVP: Upgrade the KVP daemon\n"); break; case KVP_OP_REGISTER1: dm_reg_value = KVP_OP_REGISTER1; break; default: pr_info("KVP: incompatible daemon\n"); pr_info("KVP: KVP version: %d, Daemon version: %d\n", KVP_OP_REGISTER1, msg->kvp_hdr.operation); ret = 0; } if (ret) { /* * We have a compatible daemon; complete the handshake. */ pr_info("KVP: user-mode registering done.\n"); kvp_register(dm_reg_value); kvp_transaction.active = false; if (kvp_transaction.kvp_context) poll_channel(kvp_transaction.kvp_context); } return ret; }
/** * \brief Return next event on given waitset, if one is already pending * * This is essentially a non-blocking variant of get_next_event(). It should be * used with great care, to avoid the creation of busy-waiting loops. * * \param ws Waitset * \param retclosure Pointer to storage space for returned event closure * * \returns LIB_ERR_NO_EVENT if nothing is pending */ errval_t check_for_event(struct waitset *ws, struct event_closure *retclosure) { struct waitset_chanstate *chan; int pollcount = 0; assert(ws != NULL); assert(retclosure != NULL); recheck: ; // are there any pending events on the waitset? dispatcher_handle_t handle = disp_disable(); chan = get_pending_event_disabled(ws); disp_enable(handle); if (chan != NULL) { *retclosure = chan->closure; return SYS_ERR_OK; } // if there are no pending events, poll all channels once if (ws->polled != NULL && pollcount++ == 0) { for (chan = ws->polled; chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED; chan = chan->next) { poll_channel(chan); if (ws->pending != NULL) { goto recheck; } if (chan->next == ws->polled) { // reached the start of the queue break; } } } return LIB_ERR_NO_EVENT; }
static void kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error) { struct hv_kvp_msg *kvp_msg; struct hv_kvp_exchg_msg_value *kvp_data; char *key_name; char *value; struct icmsg_hdr *icmsghdrp; int keylen = 0; int valuelen = 0; u32 buf_len; struct vmbus_channel *channel; u64 req_id; int ret; /* * If a transaction is not active; log and return. */ if (!kvp_transaction.active) { /* * This is a spurious call! */ pr_warn("KVP: Transaction not active\n"); return; } /* * Copy the global state for completing the transaction. Note that * only one transaction can be active at a time. */ buf_len = kvp_transaction.recv_len; channel = kvp_transaction.recv_channel; req_id = kvp_transaction.recv_req_id; kvp_transaction.active = false; icmsghdrp = (struct icmsg_hdr *) &recv_buffer[sizeof(struct vmbuspipe_hdr)]; if (channel->onchannel_callback == NULL) /* * We have raced with util driver being unloaded; * silently return. */ return; icmsghdrp->status = error; /* * If the error parameter is set, terminate the host's enumeration * on this pool. */ if (error) { /* * Something failed or we have timedout; * terminate the current host-side iteration. */ goto response_done; } kvp_msg = (struct hv_kvp_msg *) &recv_buffer[sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; switch (kvp_transaction.kvp_msg->kvp_hdr.operation) { case KVP_OP_GET_IP_INFO: ret = process_ob_ipinfo(msg_to_host, (struct hv_kvp_ip_msg *)kvp_msg, KVP_OP_GET_IP_INFO); if (ret < 0) icmsghdrp->status = HV_E_FAIL; goto response_done; case KVP_OP_SET_IP_INFO: goto response_done; case KVP_OP_GET: kvp_data = &kvp_msg->body.kvp_get.data; goto copy_value; case KVP_OP_SET: case KVP_OP_DELETE: goto response_done; default: break; } kvp_data = &kvp_msg->body.kvp_enum_data.data; key_name = msg_to_host->body.kvp_enum_data.data.key; /* * The windows host expects the key/value pair to be encoded * in utf16. Ensure that the key/value size reported to the host * will be less than or equal to the MAX size (including the * terminating character). */ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->key, (HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2) - 2); kvp_data->key_size = 2*(keylen + 1); /* utf16 encoding */ copy_value: value = msg_to_host->body.kvp_enum_data.data.value; valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, (wchar_t *) kvp_data->value, (HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2); kvp_data->value_size = 2*(valuelen + 1); /* utf16 encoding */ /* * If the utf8s to utf16s conversion failed; notify host * of the error. */ if ((keylen < 0) || (valuelen < 0)) icmsghdrp->status = HV_E_FAIL; kvp_data->value_type = REG_SZ; /* all our values are strings */ response_done: icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, VM_PKT_DATA_INBAND, 0); poll_channel(channel); }
static errval_t get_next_event_debug(struct waitset *ws, struct event_closure *retclosure, bool debug) { struct waitset_chanstate *chan; bool was_polling = false; cycles_t pollcycles; assert(ws != NULL); assert(retclosure != NULL); // unconditionally disable ourselves and check for events // if we decide we have to start polling, we'll jump back up here goto check_for_events; /* ------------ POLLING LOOP; RUNS WHILE ENABLED ------------ */ polling_loop: was_polling = true; assert(ws->polling); // this thread is polling // get the amount of cycles we want to poll for pollcycles = pollcycles_reset(); // while there are no pending events, poll channels while (ws->polled != NULL && ws->pending == NULL) { struct waitset_chanstate *nextchan = NULL; // NB: Polling policy is to return as soon as a pending event // appears, not bother looking at the rest of the polling queue for (chan = ws->polled; chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED && ws->pending == NULL; chan = nextchan) { nextchan = chan->next; poll_channel(chan); // update pollcycles pollcycles = pollcycles_update(pollcycles); // yield the thread if we exceed the cycle count limit if (ws->pending == NULL && pollcycles_expired(pollcycles)) { if (debug) { if (strcmp(disp_name(), "netd") != 0) { // Print the callback trace so that we know which call is leading // the schedule removal and printf("%s: callstack: %p %p %p %p\n", disp_name(), __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2), __builtin_return_address(3)); } } thread_yield(); pollcycles = pollcycles_reset(); } } // ensure that we restart polling from the place we left off here, // if the next channel is a valid one if (nextchan != NULL && nextchan->waitset == ws && nextchan->state == CHAN_POLLED) { ws->polled = nextchan; } } /* ------------ STATE MACHINERY; RUNS WHILE DISABLED ------------ */ check_for_events: ; dispatcher_handle_t handle = disp_disable(); // are there any pending events on the waitset? chan = get_pending_event_disabled(ws); if (chan != NULL) { // if we need to poll, and we have a blocked thread, wake it up to do so if (was_polling && ws->polled != NULL && ws->waiting_threads != NULL) { // start a blocked thread polling struct thread *t; t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL); assert_disabled(t == NULL); // shouldn't see a remote thread } else if (was_polling) { // I'm stopping polling, and there is nobody else assert_disabled(ws->polling); ws->polling = false; } disp_enable(handle); *retclosure = chan->closure; return SYS_ERR_OK; } // If we got here and there are channels to poll but no-one is polling, // then either we never polled, or we lost a race on the channel we picked. // Either way, we'd better start polling again. if (ws->polled != NULL && (was_polling || !ws->polling)) { if (!was_polling) { ws->polling = true; } disp_enable(handle); goto polling_loop; } // otherwise block awaiting an event chan = thread_block_disabled(handle, &ws->waiting_threads); if (chan == NULL) { // not a real event, just a wakeup to get us to start polling! assert(ws->polling); goto polling_loop; } else { *retclosure = chan->closure; return SYS_ERR_OK; } }