Exemplo n.º 1
0
static int
hv_kvp_dev_open(struct cdev *dev, int oflags, int devtype,
				struct thread *td)
{
	
	hv_kvp_log_info("%s: Opened device \"hv_kvp_device\" successfully.\n", __func__);
	if (kvp_globals.dev_accessed)
		return (-EBUSY);
	
	daemon_task = curproc;
	kvp_globals.dev_accessed = true;
	kvp_globals.daemon_busy = false;
	return (0);
}
Exemplo n.º 2
0
/*
 * Callback routine that gets called whenever there is a message from host
 */
static void
hv_kvp_callback(void *context)
{
	hv_kvp_sc *sc = (hv_kvp_sc*)context;
	/*
	 The first request from host will not be handled until daemon is registered.
	 when callback is triggered without a registered daemon, callback just return.
	 When a new daemon gets regsitered, this callbcak is trigged from _write op.
	*/
	if (sc->register_done) {
		hv_kvp_log_info("%s: Queuing work item\n", __func__);
		taskqueue_enqueue(taskqueue_thread, &sc->task);
	}
}
Exemplo n.º 3
0
static int
hv_kvp_dev_open(struct cdev *dev, int oflags, int devtype,
				struct thread *td)
{
	hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1;

	hv_kvp_log_info("%s: Opened device \"hv_kvp_device\" successfully.\n", __func__);
	if (sc->dev_accessed)
		return (-EBUSY);

	sc->daemon_task = curproc;
	sc->dev_accessed = true;
	sc->daemon_busy = false;
	return (0);
}
Exemplo n.º 4
0
/*
 * Send the response back to the host.
 */
static void
hv_kvp_respond_host(hv_kvp_sc *sc, uint32_t error)
{
	struct hv_vmbus_icmsg_hdr *hv_icmsg_hdrp;

	hv_icmsg_hdrp = (struct hv_vmbus_icmsg_hdr *)
	    &sc->rcv_buf[sizeof(struct hv_vmbus_pipe_hdr)];

	hv_icmsg_hdrp->status = error;
	hv_icmsg_hdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION |
	    HV_ICMSGHDRFLAG_RESPONSE;

	error = vmbus_chan_send(vmbus_get_channel(sc->dev),
	    VMBUS_CHANPKT_TYPE_INBAND, 0, sc->rcv_buf, sc->host_msg_len,
	    sc->host_msg_id);
	if (error)
		hv_kvp_log_info("%s: hv_kvp_respond_host: sendpacket error:%d\n",
			__func__, error);
}
Exemplo n.º 5
0
/*
 * Send the response back to the host.
 */
static void
hv_kvp_respond_host(int error)
{
	struct hv_vmbus_icmsg_hdr *hv_icmsg_hdrp;

	hv_icmsg_hdrp = (struct hv_vmbus_icmsg_hdr *)
	    &kvp_globals.rcv_buf[sizeof(struct hv_vmbus_pipe_hdr)];

	if (error)
		error = HV_KVP_E_FAIL;

	hv_icmsg_hdrp->status = error;
	hv_icmsg_hdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE;
	
	error = hv_vmbus_channel_send_packet(kvp_globals.channelp,
			kvp_globals.rcv_buf,
			kvp_globals.host_msg_len, kvp_globals.host_msg_id,
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);

	if (error)
		hv_kvp_log_info("%s: hv_kvp_respond_host: sendpacket error:%d\n",
			__func__, error);
}
Exemplo n.º 6
0
/*
 * Callback routine that gets called whenever there is a message from host
 */
void
hv_kvp_callback(void *context)
{
	uint64_t pending_cnt = 0;

	if (kvp_globals.register_done == false) {
		
		kvp_globals.channelp = context;
	} else {
		
		mtx_lock(&kvp_globals.pending_mutex);
		kvp_globals.pending_reqs = kvp_globals.pending_reqs + 1;
		pending_cnt = kvp_globals.pending_reqs;
		mtx_unlock(&kvp_globals.pending_mutex);
		if (pending_cnt == 1) {
			hv_kvp_log_info("%s: Queuing work item\n", __func__);
			hv_queue_work_item(
					service_table[HV_KVP].work_queue,
					hv_kvp_process_request,
					context
					);
		}
	}	
}
Exemplo n.º 7
0
/*
 * Function to read the kvp request buffer from host
 * and interact with daemon
 */
static void
hv_kvp_process_request(void *context)
{
	uint8_t *kvp_buf;
	hv_vmbus_channel *channel = context;
	uint32_t recvlen = 0;
	uint64_t requestid;
	struct hv_vmbus_icmsg_hdr *icmsghdrp;
	int ret = 0;
	uint64_t pending_cnt = 1;
	
	hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__);
	kvp_buf = receive_buffer[HV_KVP];
	ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
		&recvlen, &requestid);

	/*
	 * We start counting only after the daemon registers
	 * and therefore there could be requests pending in 
	 * the VMBus that are not reflected in pending_cnt.
	 * Therefore we continue reading as long as either of
	 * the below conditions is true.
	 */

	while ((pending_cnt>0) || ((ret == 0) && (recvlen > 0))) {

		if ((ret == 0) && (recvlen>0)) {
			
			icmsghdrp = (struct hv_vmbus_icmsg_hdr *)
					&kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)];
	
			hv_kvp_transaction_init(recvlen, channel, requestid, kvp_buf);
			if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
				hv_kvp_negotiate_version(icmsghdrp, NULL, kvp_buf);
				hv_kvp_respond_host(ret);
					
				/*
				 * It is ok to not acquire the mutex before setting 
				 * req_in_progress here because negotiation is the
				 * first thing that happens and hence there is no
				 * chance of a race condition.
				 */
				
				kvp_globals.req_in_progress = false;
				hv_kvp_log_info("%s :version negotiated\n", __func__);

			} else {
				if (!kvp_globals.daemon_busy) {

					hv_kvp_log_info("%s: issuing qury to daemon\n", __func__);
					mtx_lock(&kvp_globals.pending_mutex);
					kvp_globals.req_timed_out = false;
					kvp_globals.daemon_busy = true;
					mtx_unlock(&kvp_globals.pending_mutex);

					hv_kvp_send_msg_to_daemon();
					hv_kvp_log_info("%s: waiting for daemon\n", __func__);
				}
				
				/* Wait 5 seconds for daemon to respond back */
				tsleep(&kvp_globals, 0, "kvpworkitem", 5 * hz);
				hv_kvp_log_info("%s: came out of wait\n", __func__);
			}
		}

		mtx_lock(&kvp_globals.pending_mutex);
		
		/* Notice that once req_timed_out is set to true
		 * it will remain true until the next request is
		 * sent to the daemon. The response from daemon
		 * is forwarded to host only when this flag is 
		 * false. 
		 */
		kvp_globals.req_timed_out = true;

		/*
		 * Cancel request if so need be.
		 */
		if (hv_kvp_req_in_progress()) {
			hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__);
			hv_kvp_respond_host(HV_KVP_E_FAIL);
			kvp_globals.req_in_progress = false;	
		}
	
		/*
		* Decrement pending request count and
		*/
		if (kvp_globals.pending_reqs>0) {
			kvp_globals.pending_reqs = kvp_globals.pending_reqs - 1;
		}
		pending_cnt = kvp_globals.pending_reqs;
		
		mtx_unlock(&kvp_globals.pending_mutex);

		/*
		 * Try reading next buffer
		 */
		recvlen = 0;
		ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
			&recvlen, &requestid);
		hv_kvp_log_info("%s: read: context %p, pending_cnt %ju ret =%d, recvlen=%d\n",
			__func__, context, pending_cnt, ret, recvlen);
	} 
}
Exemplo n.º 8
0
/*
 * Prepare a user kvp msg based on host kvp msg (utf16 to utf8)
 * Ensure utf16_utf8 takes care of the additional string terminating char!!
 */
static void
hv_kvp_convert_hostmsg_to_usermsg(void)
{
	int utf_err = 0;
	uint32_t value_type;
	struct hv_kvp_ip_msg *host_ip_msg = (struct hv_kvp_ip_msg *)
		kvp_globals.host_kvp_msg;

	struct hv_kvp_msg *hmsg = kvp_globals.host_kvp_msg;
	struct hv_kvp_msg *umsg = &kvp_globals.daemon_kvp_msg;

	memset(umsg, 0, sizeof(struct hv_kvp_msg));

	umsg->kvp_hdr.operation = hmsg->kvp_hdr.operation;
	umsg->kvp_hdr.pool = hmsg->kvp_hdr.pool;

	switch (umsg->kvp_hdr.operation) {
	case HV_KVP_OP_SET_IP_INFO:
		hv_kvp_convert_utf16_ipinfo_to_utf8(host_ip_msg, umsg);
		break;

	case HV_KVP_OP_GET_IP_INFO:
		utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id,
		    MAX_ADAPTER_ID_SIZE,
		    (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id,
		    MAX_ADAPTER_ID_SIZE, 1, &utf_err);

		umsg->body.kvp_ip_val.addr_family =
		    host_ip_msg->kvp_ip_val.addr_family;
		break;

	case HV_KVP_OP_SET:
		value_type = hmsg->body.kvp_set.data.value_type;

		switch (value_type) {
		case HV_REG_SZ:
			umsg->body.kvp_set.data.value_size =
			    utf16_to_utf8(
				(char *)umsg->body.kvp_set.data.msg_value.value,
				HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1,
				(uint16_t *)hmsg->body.kvp_set.data.msg_value.value,
				hmsg->body.kvp_set.data.value_size,
				1, &utf_err);
			/* utf8 encoding */
			umsg->body.kvp_set.data.value_size =
			    umsg->body.kvp_set.data.value_size / 2;
			break;

		case HV_REG_U32:
			umsg->body.kvp_set.data.value_size =
			    sprintf(umsg->body.kvp_set.data.msg_value.value, "%d",
				hmsg->body.kvp_set.data.msg_value.value_u32) + 1;
			break;

		case HV_REG_U64:
			umsg->body.kvp_set.data.value_size =
			    sprintf(umsg->body.kvp_set.data.msg_value.value, "%llu",
				(unsigned long long)
				hmsg->body.kvp_set.data.msg_value.value_u64) + 1;
			break;
		}

		umsg->body.kvp_set.data.key_size =
		    utf16_to_utf8(
			umsg->body.kvp_set.data.key,
			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1,
			(uint16_t *)hmsg->body.kvp_set.data.key,
			hmsg->body.kvp_set.data.key_size,
			1, &utf_err);

		/* utf8 encoding */
		umsg->body.kvp_set.data.key_size =
		    umsg->body.kvp_set.data.key_size / 2;
		break;

	case HV_KVP_OP_GET:
		umsg->body.kvp_get.data.key_size =
		    utf16_to_utf8(umsg->body.kvp_get.data.key,
			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1,
			(uint16_t *)hmsg->body.kvp_get.data.key,
			hmsg->body.kvp_get.data.key_size,
			1, &utf_err);
		/* utf8 encoding */
		umsg->body.kvp_get.data.key_size =
		    umsg->body.kvp_get.data.key_size / 2;
		break;

	case HV_KVP_OP_DELETE:
		umsg->body.kvp_delete.key_size =
		    utf16_to_utf8(umsg->body.kvp_delete.key,
			HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1,
			(uint16_t *)hmsg->body.kvp_delete.key,
			hmsg->body.kvp_delete.key_size,
			1, &utf_err);
		/* utf8 encoding */
		umsg->body.kvp_delete.key_size =
		    umsg->body.kvp_delete.key_size / 2;
		break;

	case HV_KVP_OP_ENUMERATE:
		umsg->body.kvp_enum_data.index =
		    hmsg->body.kvp_enum_data.index;
		break;

	default:
		hv_kvp_log_info("%s: daemon_kvp_msg: Invalid operation : %d\n",
		    __func__, umsg->kvp_hdr.operation);
	}
}
Exemplo n.º 9
0
/*
 * Function to read the kvp request buffer from host
 * and interact with daemon
 */
static void
hv_kvp_process_request(void *context, int pending)
{
	uint8_t *kvp_buf;
	hv_vmbus_channel *channel;
	uint32_t recvlen = 0;
	uint64_t requestid;
	struct hv_vmbus_icmsg_hdr *icmsghdrp;
	int ret = 0;
	hv_kvp_sc		*sc;

	hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__);

	sc = (hv_kvp_sc*)context;
	kvp_buf = sc->util_sc.receive_buffer;
	channel = sc->util_sc.hv_dev->channel;

	ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
		&recvlen, &requestid);

	while ((ret == 0) && (recvlen > 0)) {

		icmsghdrp = (struct hv_vmbus_icmsg_hdr *)
			&kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)];

		hv_kvp_transaction_init(sc, recvlen, requestid, kvp_buf);
		if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
			hv_kvp_negotiate_version(icmsghdrp, NULL, kvp_buf);
			hv_kvp_respond_host(sc, ret);

			/*
			 * It is ok to not acquire the mutex before setting
			 * req_in_progress here because negotiation is the
			 * first thing that happens and hence there is no
			 * chance of a race condition.
			 */

			sc->req_in_progress = false;
			hv_kvp_log_info("%s :version negotiated\n", __func__);

		} else {
			if (!sc->daemon_busy) {

				hv_kvp_log_info("%s: issuing qury to daemon\n", __func__);
				mtx_lock(&sc->pending_mutex);
				sc->req_timed_out = false;
				sc->daemon_busy = true;
				mtx_unlock(&sc->pending_mutex);

				hv_kvp_send_msg_to_daemon(sc);
				hv_kvp_log_info("%s: waiting for daemon\n", __func__);
			}

			/* Wait 5 seconds for daemon to respond back */
			tsleep(sc, 0, "kvpworkitem", 5 * hz);
			hv_kvp_log_info("%s: came out of wait\n", __func__);
		}

		mtx_lock(&sc->pending_mutex);

		/* Notice that once req_timed_out is set to true
		 * it will remain true until the next request is
		 * sent to the daemon. The response from daemon
		 * is forwarded to host only when this flag is
		 * false.
		 */
		sc->req_timed_out = true;

		/*
		 * Cancel request if so need be.
		 */
		if (hv_kvp_req_in_progress(sc)) {
			hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__);
			hv_kvp_respond_host(sc, HV_KVP_E_FAIL);
			sc->req_in_progress = false;
		}

		mtx_unlock(&sc->pending_mutex);

		/*
		 * Try reading next buffer
		 */
		recvlen = 0;
		ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE,
			&recvlen, &requestid);
		hv_kvp_log_info("%s: read: context %p, ret =%d, recvlen=%d\n",
			__func__, context, ret, recvlen);
	}
}
Exemplo n.º 10
0
/*
 * Function to read the kvp request buffer from host
 * and interact with daemon
 */
static void
hv_kvp_process_request(void *context, int pending)
{
	uint8_t *kvp_buf;
	struct vmbus_channel *channel;
	uint32_t recvlen = 0;
	uint64_t requestid;
	struct hv_vmbus_icmsg_hdr *icmsghdrp;
	int ret = 0, error;
	hv_kvp_sc *sc;

	hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__);

	sc = (hv_kvp_sc*)context;
	kvp_buf = sc->util_sc.ic_buf;
	channel = vmbus_get_channel(sc->dev);

	recvlen = sc->util_sc.ic_buflen;
	ret = vmbus_chan_recv(channel, kvp_buf, &recvlen, &requestid);
	KASSERT(ret != ENOBUFS, ("hvkvp recvbuf is not large enough"));
	/* XXX check recvlen to make sure that it contains enough data */

	while ((ret == 0) && (recvlen > 0)) {
		icmsghdrp = (struct hv_vmbus_icmsg_hdr *)
		    &kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)];

		hv_kvp_transaction_init(sc, recvlen, requestid, kvp_buf);
		if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
			error = vmbus_ic_negomsg(&sc->util_sc,
			    kvp_buf, &recvlen, KVP_FWVER, KVP_MSGVER);
			/* XXX handle vmbus_ic_negomsg failure. */
			if (!error)
				hv_kvp_respond_host(sc, HV_S_OK);
			else
				hv_kvp_respond_host(sc, HV_E_FAIL);
			/*
			 * It is ok to not acquire the mutex before setting
			 * req_in_progress here because negotiation is the
			 * first thing that happens and hence there is no
			 * chance of a race condition.
			 */

			sc->req_in_progress = false;
			hv_kvp_log_info("%s :version negotiated\n", __func__);

		} else {
			if (!sc->daemon_busy) {

				hv_kvp_log_info("%s: issuing qury to daemon\n", __func__);
				mtx_lock(&sc->pending_mutex);
				sc->req_timed_out = false;
				sc->daemon_busy = true;
				mtx_unlock(&sc->pending_mutex);

				hv_kvp_send_msg_to_daemon(sc);
				hv_kvp_log_info("%s: waiting for daemon\n", __func__);
			}

			/* Wait 5 seconds for daemon to respond back */
			tsleep(sc, 0, "kvpworkitem", 5 * hz);
			hv_kvp_log_info("%s: came out of wait\n", __func__);
		}

		mtx_lock(&sc->pending_mutex);

		/* Notice that once req_timed_out is set to true
		 * it will remain true until the next request is
		 * sent to the daemon. The response from daemon
		 * is forwarded to host only when this flag is
		 * false.
		 */
		sc->req_timed_out = true;

		/*
		 * Cancel request if so need be.
		 */
		if (hv_kvp_req_in_progress(sc)) {
			hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__);
			hv_kvp_respond_host(sc, HV_E_FAIL);
			sc->req_in_progress = false;
		}

		mtx_unlock(&sc->pending_mutex);

		/*
		 * Try reading next buffer
		 */
		recvlen = sc->util_sc.ic_buflen;
		ret = vmbus_chan_recv(channel, kvp_buf, &recvlen, &requestid);
		KASSERT(ret != ENOBUFS, ("hvkvp recvbuf is not large enough"));
		/* XXX check recvlen to make sure that it contains enough data */

		hv_kvp_log_info("%s: read: context %p, ret =%d, recvlen=%d\n",
			__func__, context, ret, recvlen);
	}
}