Esempio n. 1
0
/*
 * These carefully handle interactions with other cpus and return
 * the original vpte.  Clearing VPTE_RW prevents us from racing the
 * setting of VPTE_M, allowing us to invalidate the tlb (the real cpu's
 * pmap) and get good status for VPTE_M.
 *
 * When messing with page directory entries we have to clear the cpu
 * mask to force a reload of the kernel's page table mapping cache.
 *
 * clean: clear VPTE_M and VPTE_RW
 * setro: clear VPTE_RW
 * load&clear: clear entire field
 */
vpte_t
pmap_clean_pte(volatile vpte_t *ptep, struct pmap *pmap, vm_offset_t va)
{
	vpte_t pte;

	pte = *ptep;
	if (pte & VPTE_V) {
		atomic_clear_long(ptep, VPTE_RW);
		pmap_inval_cpu(pmap, va, PAGE_SIZE);
		pte = *ptep;
		atomic_clear_long(ptep, VPTE_RW|VPTE_M);
	}
	return(pte);
}
Esempio n. 2
0
/*
 * Net VSC on send completion
 */
static void
hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
    const struct vmbus_chanpkt_hdr *pkt)
{
	const nvsp_msg *nvsp_msg_pkt;
	netvsc_packet *net_vsc_pkt;

	nvsp_msg_pkt = VMBUS_CHANPKT_CONST_DATA(pkt);

	if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete
		|| nvsp_msg_pkt->hdr.msg_type
			== nvsp_msg_1_type_send_rx_buf_complete
		|| nvsp_msg_pkt->hdr.msg_type
			== nvsp_msg_1_type_send_send_buf_complete
		|| nvsp_msg_pkt->hdr.msg_type
			== nvsp_msg5_type_subchannel) {
		/* Copy the response back */
		memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt,
		    sizeof(nvsp_msg));
		sema_post(&net_dev->channel_init_sema);
	} else if (nvsp_msg_pkt->hdr.msg_type ==
		    nvsp_msg_1_type_send_rndis_pkt_complete) {
		/* Get the send context */
		net_vsc_pkt =
		    (netvsc_packet *)(unsigned long)pkt->cph_xactid;
		if (NULL != net_vsc_pkt) {
			if (net_vsc_pkt->send_buf_section_idx !=
			    NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
				u_long mask;
				int idx;

				idx = net_vsc_pkt->send_buf_section_idx /
				    BITS_PER_LONG;
				KASSERT(idx < net_dev->bitsmap_words,
				    ("invalid section index %u",
				     net_vsc_pkt->send_buf_section_idx));
				mask = 1UL <<
				    (net_vsc_pkt->send_buf_section_idx %
				     BITS_PER_LONG);

				KASSERT(net_dev->send_section_bitsmap[idx] &
				    mask,
				    ("index bitmap 0x%lx, section index %u, "
				     "bitmap idx %d, bitmask 0x%lx",
				     net_dev->send_section_bitsmap[idx],
				     net_vsc_pkt->send_buf_section_idx,
				     idx, mask));
				atomic_clear_long(
				    &net_dev->send_section_bitsmap[idx], mask);
			}
			
			/* Notify the layer above us */
			net_vsc_pkt->compl.send.on_send_completion(chan,
			    net_vsc_pkt->compl.send.send_completion_context);

		}
	}
}
Esempio n. 3
0
/*
 * This is a combination of pmap_inval_pte() and pmap_clean_pte().
 * Firts prevent races with the 'A' and 'M' bits, then clean out
 * the tlb (the real cpu's pmap), then incorporate any races that
 * may have occured in the mean time, and finally zero out the pte.
 */
vpte_t
pmap_inval_loadandclear(volatile vpte_t *ptep, struct pmap *pmap,
			vm_offset_t va)
{
	vpte_t pte;

	pte = *ptep;
	if (pte & VPTE_V) {
		pte = *ptep;
		atomic_clear_long(ptep, VPTE_RW);
		pmap_inval_cpu(pmap, va, PAGE_SIZE);
		pte |= *ptep & (VPTE_A | VPTE_M);
	}
	*ptep = 0;
	return(pte);
}