/* * VM to hypervisor call mechanism. We use the standard VMware naming * convention since shared code is calling this function as well. */ int vmci_send_datagram(struct vmci_datagram *dg) { unsigned long flags; int result; /* Check args. */ if (dg == NULL) return VMCI_ERROR_INVALID_ARGS; /* * Need to acquire spinlock on the device because the datagram * data may be spread over multiple pages and the monitor may * interleave device user rpc calls from multiple * VCPUs. Acquiring the spinlock precludes that * possibility. Disabling interrupts to avoid incoming * datagrams during a "rep out" and possibly landing up in * this function. */ spin_lock_irqsave(&vmci_dev_spinlock, flags); if (vmci_dev_g) { iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, dg, VMCI_DG_SIZE(dg)); result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); } else { result = VMCI_ERROR_UNAVAILABLE; } spin_unlock_irqrestore(&vmci_dev_spinlock, flags); return result; }
static void oxnas_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len) { struct nand_chip *chip = mtd_to_nand(mtd); struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip); iowrite8_rep(oxnas->io_base, buf, len); }
void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer, size_t count, u16 offset, u8 reg_width) { void __iomem *addr = core->bus->mmio + offset; if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); switch (reg_width) { case sizeof(u8): iowrite8_rep(addr, buffer, count); break; case sizeof(u16): WARN_ON(count & 1); iowrite16_rep(addr, buffer, count >> 1); break; case sizeof(u32): WARN_ON(count & 3); iowrite32_rep(addr, buffer, count >> 2); break; default: WARN_ON(1); } }
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) { iowrite8_rep(reg, data, count); }
void outsb(unsigned long port, const void *src, unsigned long count) { iowrite8_rep(ioport_map(port, 1), src, count); }