Ejemplo n.º 1
0
int bcm3450_read_reg(u8 offset)
{
    struct i2c_msg msg[2];
    int val;
    struct i2c_client *client = &pclient_data->client;

    BCM_LOG_DEBUG(BCM_LOG_ID_I2C, "Entering the function %s \n", __FUNCTION__);

    if(check_offset(offset, DWORD_ALIGN)) {
        return -EINVAL;
    }

    /* BCM3450 requires the offset to be the register number */
    offset = offset/4;

    msg[0].addr = msg[1].addr = client->addr;
    msg[0].flags = msg[1].flags = client->flags & I2C_M_TEN;

    msg[0].len = 1;
    msg[0].buf = (char *)&offset;

    msg[1].flags |= I2C_M_RD;
    msg[1].len = 4;
    msg[1].buf = (char *)&val;

    /* On I2C bus, we receive LS byte first. So swap bytes as necessary */
    if(i2c_transfer(client->adapter, msg, 2) == 2)
        return swab32(val);

    return -1;
}
Ejemplo n.º 2
0
static inline void PBKDF2_SHA256_80_128_4way(const uint32_t *tstate,
	const uint32_t *ostate, const uint32_t *salt, uint32_t *output)
{
	uint32_t istate[4 * 8] __attribute__((aligned(16)));
	uint32_t ostate2[4 * 8] __attribute__((aligned(16)));
	uint32_t ibuf[4 * 16] __attribute__((aligned(16)));
	uint32_t obuf[4 * 16] __attribute__((aligned(16)));
	int i, j;

	memcpy(istate, tstate, 4 * 32);
	sha256_transform_4way(istate, salt, 0);
	
	memcpy(ibuf, salt + 4 * 16, 4 * 16);
	memcpy(ibuf + 4 * 5, innerpad_4way, 4 * 44);
	memcpy(obuf + 4 * 8, outerpad_4way, 4 * 32);

	for (i = 0; i < 4; i++) {
		memcpy(obuf, istate, 4 * 32);
		ibuf[4 * 4 + 0] = i + 1;
		ibuf[4 * 4 + 1] = i + 1;
		ibuf[4 * 4 + 2] = i + 1;
		ibuf[4 * 4 + 3] = i + 1;
		sha256_transform_4way(obuf, ibuf, 0);

		memcpy(ostate2, ostate, 4 * 32);
		sha256_transform_4way(ostate2, obuf, 0);
		for (j = 0; j < 4 * 8; j++)
			output[4 * 8 * i + j] = swab32(ostate2[j]);
	}
}
Ejemplo n.º 3
0
static inline void PBKDF2_SHA256_80_128(const uint32_t *tstate,
	const uint32_t *ostate, const uint32_t *salt, uint32_t *output)
{
	uint32_t istate[8], ostate2[8];
	uint32_t ibuf[16], obuf[16];
	int i, j;

	memcpy(istate, tstate, 32);
	sha256_transform(istate, salt, 0);
	
	memcpy(ibuf, salt + 16, 16);
	memcpy(ibuf + 5, innerpad, 44);
	memcpy(obuf + 8, outerpad, 32);

	for (i = 0; i < 4; i++) {
		memcpy(obuf, istate, 32);
		ibuf[4] = i + 1;
		sha256_transform(obuf, ibuf, 0);

		memcpy(ostate2, ostate, 32);
		sha256_transform(ostate2, obuf, 0);
		for (j = 0; j < 8; j++)
			output[8 * i + j] = swab32(ostate2[j]);
	}
}
Ejemplo n.º 4
0
static inline void swap32yes(void *out, const void *in, size_t sz)
{
	size_t swapcounter;

	for (swapcounter = 0; swapcounter < sz; ++swapcounter)
		(((uint32_t*)out)[swapcounter]) = swab32(((uint32_t*)in)[swapcounter]);
}
Ejemplo n.º 5
0
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
			     struct mlx4_en_tx_ring *ring,
			     int cq)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;

	ring->cqn = cq;
	ring->prod = 0;
	ring->cons = 0xffffffff;
	ring->last_nr_txbb = 1;
	ring->poll_cnt = 0;
	ring->blocked = 0;
	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
	memset(ring->buf, 0, ring->buf_size);

	ring->qp_state = MLX4_QP_STATE_RST;
	ring->doorbell_qpn = swab32(ring->qp.qpn << 8);

	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
				ring->cqn, &ring->context);

	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
			       &ring->qp, &ring->qp_state);

	return err;
}
static void handle_catas(struct mthca_dev *dev)
{
	struct ib_event event;
	const char *type;
	int i;

	event.device = &dev->ib_dev;
	event.event  = IB_EVENT_DEVICE_FATAL;
	event.element.port_num = 0;

	ib_dispatch_event(&event);

	switch (swab32(readl(dev->catas_err.map)) >> 24) {
	case MTHCA_CATAS_TYPE_INTERNAL:
		type = "internal error";
		break;
	case MTHCA_CATAS_TYPE_UPLINK:
		type = "uplink bus error";
		break;
	case MTHCA_CATAS_TYPE_DDR:
		type = "DDR data error";
		break;
	case MTHCA_CATAS_TYPE_PARITY:
		type = "internal parity error";
		break;
	default:
		type = "unknown error";
		break;
	}

	mthca_err(dev, "Catastrophic error detected: %s\n", type);
	for (i = 0; i < dev->catas_err.size; ++i)
		mthca_err(dev, "  buf[%02x]: %08x\n",
			  i, swab32(readl(dev->catas_err.map + i)));
}
Ejemplo n.º 7
0
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
	if (dev->swab)
		b = swab32(b);

	writel(b, dev->base + offset);
}
Ejemplo n.º 8
0
/* Write Function of PROCFS attribute "/proc/b3450_reg" */
static ssize_t b3450_proc_write(struct file *file, const char __user *buffer,
                                  unsigned long count, void *data)
{
    int value, ret_val;
    int offset = (int)file->f_pos;

    /* Only 4-Byte writes are supported */
    if (count != REGISTER_LENGTH) {
        BCM_LOG_NOTICE(BCM_LOG_ID_I2C, "Only 4 Byte writes are supported \n");
        return -EINVAL;
    }

    BCM_LOG_DEBUG(BCM_LOG_ID_I2C, "The offset is %d; the count is %ld \n", 
                  offset, count);

#ifdef __LITTLE_ENDIAN
    value = *((int *)buffer);
#else
    value = swab32(*((int *)buffer));
#endif

    ret_val = bcm3450_write_reg(offset, value);

    /* If ret_val is less than 0, return ret_val; else return number
       of bytes read which is 4 */
    return ((ret_val < 0) ? ret_val : REGISTER_LENGTH);
}
Ejemplo n.º 9
0
void swab_work_data( struct work *work )
{
   for ( int i = 0; i <= 18; i++ )
      work->data[i] = swab32( work->data[i] );
   work->data[20] = 0x80000000;
   work->data[31] = 0x00000280;
}
Ejemplo n.º 10
0
/*
 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
 *				     guest.
 *
 * @vcpu: the offending vcpu
 *
 * Returns:
 *  1: GICV access successfully performed
 *  0: Not a GICV access
 * -1: Illegal GICV access
 */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_dist *vgic = &kvm->arch.vgic;
	phys_addr_t fault_ipa;
	void __iomem *addr;
	int rd;

	/* Build the full address */
	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

	/* If not for GICV, move on */
	if (fault_ipa <  vgic->vgic_cpu_base ||
	    fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
		return 0;

	/* Reject anything but a 32bit access */
	if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
		return -1;

	/* Not aligned? Don't bother */
	if (fault_ipa & 3)
		return -1;

	rd = kvm_vcpu_dabt_get_rd(vcpu);
	addr  = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
	addr += fault_ipa - vgic->vgic_cpu_base;

	if (kvm_vcpu_dabt_iswrite(vcpu)) {
		u32 data = vcpu_get_reg(vcpu, rd);
		if (__is_be(vcpu)) {
			/* guest pre-swabbed data, undo this for writel() */
			data = swab32(data);
		}
		writel_relaxed(data, addr);
	} else {
		u32 data = readl_relaxed(addr);
		if (__is_be(vcpu)) {
			/* guest expects swabbed data */
			data = swab32(data);
		}
		vcpu_set_reg(vcpu, rd, data);
	}

	return 1;
}
Ejemplo n.º 11
0
/*
 * Handle after a dma read
 */
static void at91_mci_post_dma_read(struct at91mci_host *host)
{
	struct mmc_command *cmd;
	struct mmc_data *data;

	pr_debug("post dma read\n");

	cmd = host->cmd;
	if (!cmd) {
		pr_debug("no command\n");
		return;
	}

	data = cmd->data;
	if (!data) {
		pr_debug("no data\n");
		return;
	}

	while (host->in_use_index < host->transfer_index) {
		struct scatterlist *sg;

		pr_debug("finishing index %d\n", host->in_use_index);

		sg = &data->sg[host->in_use_index++];

		pr_debug("Unmapping page %08X\n", sg->dma_address);

		dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);

		data->bytes_xfered += sg->length;

		if (cpu_is_at91rm9200()) {	/* AT91RM9200 errata */
			unsigned int *buffer;
			int index;

			/* Swap the contents of the buffer */
			buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
			pr_debug("buffer = %p, length = %d\n", buffer, sg->length);

			for (index = 0; index < (sg->length / 4); index++)
				buffer[index] = swab32(buffer[index]);

			kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
		}

		flush_dcache_page(sg_page(sg));
	}

	/* Is there another transfer to trigger? */
	if (host->transfer_index < data->sg_len)
		at91_mci_pre_dma_read(host);
	else {
		at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
		at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
	}

	pr_debug("post dma read done\n");
}
Ejemplo n.º 12
0
static int ext2_check_descriptors (struct super_block * sb)
{
	int i;
	int desc_block = 0;
	unsigned long block = swab32(sb->u.ext2_sb.s_es->s_first_data_block);
	struct ext2_group_desc * gdp = NULL;

	ext2_debug ("Checking group descriptors");

	for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++)
	{
		if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
			gdp = (struct ext2_group_desc *) sb->u.ext2_sb.s_group_desc[desc_block++]->b_data;
		if (swab32(gdp->bg_block_bitmap) < block ||
		    swab32(gdp->bg_block_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
		{
			ext2_error (sb, "ext2_check_descriptors",
				    "Block bitmap for group %d"
				    " not in group (block %lu)!",
				    i, (unsigned long) swab32(gdp->bg_block_bitmap));
			return 0;
		}
		if (swab32(gdp->bg_inode_bitmap) < block ||
		    swab32(gdp->bg_inode_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
		{
			ext2_error (sb, "ext2_check_descriptors",
				    "Inode bitmap for group %d"
				    " not in group (block %lu)!",
				    i, (unsigned long) swab32(gdp->bg_inode_bitmap));
			return 0;
		}
		if (swab32(gdp->bg_inode_table) < block ||
		    swab32(gdp->bg_inode_table) + sb->u.ext2_sb.s_itb_per_group >=
		    block + EXT2_BLOCKS_PER_GROUP(sb))
		{
			ext2_error (sb, "ext2_check_descriptors",
				    "Inode table for group %d"
				    " not in group (block %lu)!",
				    i, (unsigned long) swab32(gdp->bg_inode_table));
			return 0;
		}
		block += EXT2_BLOCKS_PER_GROUP(sb);
		gdp++;
	}
	return 1;
}
Ejemplo n.º 13
0
static void swap_buffer(u32 *p, u32 len)
{
	while (len) {
		*p = swab32(*p);
		p++;
		len -= 4;
	}
}
Ejemplo n.º 14
0
unsigned inl(unsigned port)
{
	u32 val;
	u8 *addr = (u8 *)(port + _IO_BASE);

	idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0);
	return swab32(val);
}
Ejemplo n.º 15
0
/* QSPI support swapping the flash read/write data
 * in hardware for LS102xA, but not for VF610 */
static inline u32 qspi_endian_xchg(u32 data)
{
#ifdef CONFIG_VF610
	return swab32(data);
#else
	return data;
#endif
}
Ejemplo n.º 16
0
Archivo: pci.c Proyecto: 4pao/openwrt
static int amazon_pci_config_access(unsigned char access_type,
	struct pci_bus *bus, unsigned int devfn, unsigned int where, u32 *data)
{
	unsigned long flags;
	u32 pci_addr;
	u32 val;
	int ret;

	/* Amazon support slot from 0 to 15 */
	/* devfn 0 & 0x20 is itself */
	if ((bus->number != 0) || (devfn > 0x7f) || (devfn == 0) || (devfn == 0x20))
		return 1;

	local_irq_save(flags);

	pci_addr = AMAZON_PCI_CFG_BASE |
		bus->number << AMAZON_PCI_CFG_BUSNUM_SHF |
		devfn << AMAZON_PCI_CFG_FUNNUM_SHF |
		(where & ~0x3);

	if (access_type == PCI_ACCESS_WRITE)
	{
#ifdef CONFIG_SWAP_IO_SPACE
		val = swab32(*data);
#endif
		ret = put_dbe(val, (u32 *)pci_addr);
	} else {
		ret = get_dbe(val, (u32 *)pci_addr);
#ifdef CONFIG_SWAP_IO_SPACE
		*data = swab32(val);
#else
		*data = val;
#endif
	}

	amazon_writel(amazon_readl(PCI_MODE) & (~(1<<PCI_MODE_cfgok_bit)), PCI_MODE);
	amazon_writel(amazon_readl(STATUS_COMMAND_ADDR), STATUS_COMMAND_ADDR);
	amazon_writel(amazon_readl(PCI_MODE) | (~(1<<PCI_MODE_cfgok_bit)), PCI_MODE);
	mb();
	local_irq_restore(flags);

	if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
		return 1;

	return ret; 
}
static void ykd_state_endian_convert (struct ykd_state *ykd_state)
{
	int i;

	ykd_session_endian_convert (&ykd_state->last_primary);
	ykd_state->last_formed_entries = swab32 (ykd_state->last_formed_entries);
	ykd_state->ambiguous_sessions_entries = swab32 (ykd_state->ambiguous_sessions_entries);
	ykd_state->session_id = swab32 (ykd_state->session_id);

	for (i = 0; i < ykd_state->last_formed_entries; i++) {
		ykd_session_endian_convert (&ykd_state->last_formed[i]);
	}
	
	for (i = 0; i < ykd_state->ambiguous_sessions_entries; i++) {
		ykd_session_endian_convert (&ykd_state->ambiguous_sessions[i]);
	}
}
Ejemplo n.º 18
0
u32  iSeries_Read_Long(void* IoAddress)
{
	setUpMmIo(IoAddress,Read);
	do {
		HvCall3Ret16(HvCallPciBarLoad32,&Return, DsaData.DsaAddr,BarOffset, 0);

		if(Return.rc != 0 ) {
			logPciError("RDL",IoAddress, DevNode, Return.rc);
		}
		else if ( DevNode->IoRetry > 0) {
			pciRetrySuccessful(DevNode);
		}
	} while (Return.rc != 0);
	spin_unlock_irqrestore(&DevNode->IoLock, IrqFlags ); 
	if(Pci_Trace_Flag == 1 ) PCIFR("RDL: IoAddress 0x%p = 0x%08X",IoAddress, swab32((u32)Return.value));
	return swab32((u32)Return.value);
}
Ejemplo n.º 19
0
static void ext2_setup_super (struct super_block * sb,
			      struct ext2_super_block * es)
{
	if (swab32(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
			printk ("EXT2-fs warning: revision level too high, "
				"forcing read/only mode\n");
			sb->s_flags |= MS_RDONLY;
	}
	if (!(sb->s_flags & MS_RDONLY)) {
		if (!(sb->u.ext2_sb.s_mount_state & EXT2_VALID_FS))
			printk ("EXT2-fs warning: mounting unchecked fs, "
				"running e2fsck is recommended\n");
		else if ((sb->u.ext2_sb.s_mount_state & EXT2_ERROR_FS))
			printk ("EXT2-fs warning: mounting fs with errors, "
				"running e2fsck is recommended\n");
		else if ((__s16) swab16(es->s_max_mnt_count) >= 0 &&
		         swab16(es->s_mnt_count) >=
			 (unsigned short) (__s16) swab16(es->s_max_mnt_count))
			printk ("EXT2-fs warning: maximal mount count reached, "
				"running e2fsck is recommended\n");
		else if (swab32(es->s_checkinterval) &&
			(swab32(es->s_lastcheck) + swab32(es->s_checkinterval) <= CURRENT_TIME))
			printk ("EXT2-fs warning: checktime reached, "
				"running e2fsck is recommended\n");
		es->s_state = swab16(swab16(es->s_state) & ~EXT2_VALID_FS);
		if (!(__s16) swab16(es->s_max_mnt_count))
			es->s_max_mnt_count = (__s16) swab16(EXT2_DFL_MAX_MNT_COUNT);
		es->s_mnt_count=swab16(swab16(es->s_mnt_count) + 1);
		es->s_mtime = swab32(CURRENT_TIME);
		mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1);
		sb->s_dirt = 1;
		if (test_opt (sb, DEBUG))
			printk ("[EXT II FS %s, %s, bs=%lu, fs=%lu, gc=%lu, "
				"bpg=%lu, ipg=%lu, mo=%04lx]\n",
				EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
				sb->u.ext2_sb.s_frag_size,
				sb->u.ext2_sb.s_groups_count,
				EXT2_BLOCKS_PER_GROUP(sb),
				EXT2_INODES_PER_GROUP(sb),
				sb->u.ext2_sb.s_mount_opt);
		if (test_opt (sb, CHECK)) {
			ext2_check_blocks_bitmap (sb);
			ext2_check_inodes_bitmap (sb);
		}
	}
}
Ejemplo n.º 20
0
static void at91_mci_post_dma_read(struct at91mci_host *host)
{
	struct mmc_command *cmd;
	struct mmc_data *data;
	unsigned int len, i, size;
	unsigned *dmabuf = host->buffer;

	pr_debug("post dma read\n");

	cmd = host->cmd;
	if (!cmd) {
		pr_debug("no command\n");
		return;
	}

	data = cmd->data;
	if (!data) {
		pr_debug("no data\n");
		return;
	}

	size = data->blksz * data->blocks;
	len = data->sg_len;

	at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
	at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);

	for (i = 0; i < len; i++) {
		struct scatterlist *sg;
		int amount;
		unsigned int *sgbuffer;

		sg = &data->sg[i];

		sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
		amount = min(size, sg->length);
		size -= amount;

		if (cpu_is_at91rm9200()) {	
			int index;
			for (index = 0; index < (amount / 4); index++)
				sgbuffer[index] = swab32(*dmabuf++);
		} else {
			char *tmpv = (char *)dmabuf;
			memcpy(sgbuffer, tmpv, amount);
			tmpv += amount;
			dmabuf = (unsigned *)tmpv;
		}

		flush_kernel_dcache_page(sg_page(sg));
		kunmap_atomic(sgbuffer);
		data->bytes_xfered += amount;
		if (size == 0)
			break;
	}

	pr_debug("post dma read done\n");
}
Ejemplo n.º 21
0
static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
	int i;

	shared->sprg0 = swab64(shared->sprg0);
	shared->sprg1 = swab64(shared->sprg1);
	shared->sprg2 = swab64(shared->sprg2);
	shared->sprg3 = swab64(shared->sprg3);
	shared->srr0 = swab64(shared->srr0);
	shared->srr1 = swab64(shared->srr1);
	shared->dar = swab64(shared->dar);
	shared->msr = swab64(shared->msr);
	shared->dsisr = swab32(shared->dsisr);
	shared->int_pending = swab32(shared->int_pending);
	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
		shared->sr[i] = swab32(shared->sr[i]);
}
Ejemplo n.º 22
0
u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
	u32 value = readl(dev->base + offset);

	if (dev->swab)
		return swab32(value);
	else
		return value;
}
static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
	unsigned int devfn, unsigned int where, u32 *data)
{
	unsigned long cfg_base;
	unsigned long flags;
	u32 temp;

	/* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the
	   SoC itself */
	if ((bus->number != 0) || ((devfn & 0xf8) > 0x78)
		|| ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68))
		return 1;

	spin_lock_irqsave(&ebu_lock, flags);

	cfg_base = (unsigned long) ltq_pci_mapped_cfg;
	cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn <<
			LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3);

	/* Perform access */
	if (access_type == PCI_ACCESS_WRITE) {
		ltq_w32(swab32(*data), ((u32 *)cfg_base));
	} else {
		*data = ltq_r32(((u32 *)(cfg_base)));
		*data = swab32(*data);
	}
	wmb();

	/* clean possible Master abort */
	cfg_base = (unsigned long) ltq_pci_mapped_cfg;
	cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
	temp = ltq_r32(((u32 *)(cfg_base)));
	temp = swab32(temp);
	cfg_base = (unsigned long) ltq_pci_mapped_cfg;
	cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
	ltq_w32(temp, ((u32 *)cfg_base));

	spin_unlock_irqrestore(&ebu_lock, flags);

	if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
		return 1;

	return 0;
}
Ejemplo n.º 24
0
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
			u64 val, unsigned int bytes, int is_default_endian)
{
	void *data = run->mmio.data;
	int idx, ret;
	bool host_swabbed;

	/* Pity C doesn't have a logical XOR operator */
	if (kvmppc_need_byteswap(vcpu)) {
		host_swabbed = is_default_endian;
	} else {
		host_swabbed = !is_default_endian;
	}

	if (bytes > sizeof(run->mmio.data)) {
		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
		       run->mmio.len);
	}

	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
	run->mmio.len = bytes;
	run->mmio.is_write = 1;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 1;

	/* Store the value at the lowest bytes in 'data'. */
	if (!host_swabbed) {
		switch (bytes) {
		case 8: *(u64 *)data = val; break;
		case 4: *(u32 *)data = val; break;
		case 2: *(u16 *)data = val; break;
		case 1: *(u8  *)data = val; break;
		}
	} else {
		switch (bytes) {
		case 8: *(u64 *)data = swab64(val); break;
		case 4: *(u32 *)data = swab32(val); break;
		case 2: *(u16 *)data = swab16(val); break;
		case 1: *(u8  *)data = val; break;
		}
	}

	idx = srcu_read_lock(&vcpu->kvm->srcu);

	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
			       bytes, &run->mmio.data);

	srcu_read_unlock(&vcpu->kvm->srcu, idx);

	if (!ret) {
		vcpu->mmio_needed = 0;
		return EMULATE_DONE;
	}

	return EMULATE_DO_MMIO;
}
Ejemplo n.º 25
0
static inline int scanhash_sha256d_4way(int thr_id, uint32_t *pdata,
	const uint32_t *ptarget, uint32_t max_nonce, struct timeval *tv_start, struct timeval *tv_end, unsigned long *hashes_done)
{
		gettimeofday(tv_start, NULL);

	uint32_t data[4 * 64] __attribute__((aligned(128)));
	uint32_t hash[4 * 8] __attribute__((aligned(32)));
	uint32_t midstate[4 * 8] __attribute__((aligned(32)));
	uint32_t prehash[4 * 8] __attribute__((aligned(32)));
	uint32_t n = pdata[19] - 1;
	const uint32_t first_nonce = pdata[19];
	const uint32_t Htarg = ptarget[7];
	int i, j;

	memcpy(data, pdata + 16, 64);
	sha256d_preextend(data);
	for (i = 31; i >= 0; i--)
		for (j = 0; j < 4; j++)
			data[i * 4 + j] = data[i];

	sha256_init(midstate);
	sha256_transform(midstate, pdata, 0);
	memcpy(prehash, midstate, 32);
	sha256d_prehash(prehash, pdata + 16);
	for (i = 7; i >= 0; i--) {
		for (j = 0; j < 4; j++) {
			midstate[i * 4 + j] = midstate[i];
			prehash[i * 4 + j] = prehash[i];
		}
	}

	do {
		for (i = 0; i < 4; i++)
			data[4 * 3 + i] = ++n;

		sha256d_ms_4way(hash, data, midstate, prehash);

		for (i = 0; i < 4; i++) {
			if (swab32(hash[4 * 7 + i]) <= Htarg) {
				pdata[19] = data[4 * 3 + i];
				sha256d_80_swap(hash, pdata);
				if (fulltest(hash, ptarget)) {
					*hashes_done = n - first_nonce + 1;
					gettimeofday(&tv_end, NULL);
					return 1;
				}
			}
		}
	} while (n < max_nonce && !scan_abort_flag && !work_restart[thr_id].restart);

	*hashes_done = n - first_nonce + 1;
	pdata[19] = n;
	gettimeofday(&tv_end, NULL);
	return 0;
}
Ejemplo n.º 26
0
static inline int scanhash_sha256d_8way(int thr_id, struct work *work,
                              uint32_t max_nonce, uint64_t *hashes_done)
{
        uint32_t *pdata = work->data;
        uint32_t *ptarget = work->target;

	uint32_t _ALIGN(128) data[8 * 64];
	uint32_t _ALIGN(32)  hash[8 * 8];
	uint32_t _ALIGN(32)  midstate[8 * 8];
	uint32_t _ALIGN(32)  prehash[8 * 8];
	uint32_t n = pdata[19] - 1;
	const uint32_t first_nonce = pdata[19];
	const uint32_t Htarg = ptarget[7];
	int i, j;
	
	memcpy(data, pdata + 16, 64);
	sha256d_preextend(data);
	for (i = 31; i >= 0; i--)
		for (j = 0; j < 8; j++)
			data[i * 8 + j] = data[i];
	
	sha256_init(midstate);
	sha256_transform(midstate, pdata, 0);
	memcpy(prehash, midstate, 32);
	sha256d_prehash(prehash, pdata + 16);
	for (i = 7; i >= 0; i--) {
		for (j = 0; j < 8; j++) {
			midstate[i * 8 + j] = midstate[i];
			prehash[i * 8 + j] = prehash[i];
		}
	}
	
	do {
		for (i = 0; i < 8; i++)
			data[8 * 3 + i] = ++n;
		
		sha256d_ms_8way(hash, data, midstate, prehash);
		
		for (i = 0; i < 8; i++) {
			if (swab32(hash[8 * 7 + i]) <= Htarg) {
				pdata[19] = data[8 * 3 + i];
				sha256d_80_swap(hash, pdata);
				if (fulltest(hash, ptarget)) {
					*hashes_done = n - first_nonce + 1;
					return 1;
				}
			}
		}
	} while (n < max_nonce && !work_restart[thr_id].restart);
	
	*hashes_done = n - first_nonce + 1;
	pdata[19] = n;
	return 0;
}
Ejemplo n.º 27
0
static inline void pdacf_transfer_mono32sw(u32 *dst, u32 xor, unsigned int size, unsigned long rdp_port)
{
	register u16 val1, val2;

	while (size-- > 0) {
		val1 = inw(rdp_port);
		val2 = inw(rdp_port);
		inw(rdp_port);
		*dst++ = swab32((((val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor);
	}
}
Ejemplo n.º 28
0
// called by every thread, copies the backup to each thread's work.
void hodl_resync_threads( struct work* work )
{
   int nonce_index = algo_gate.nonce_index;
   pthread_barrier_wait( &hodl_barrier );
   if ( memcmp( work->data, hodl_work.data, algo_gate.work_cmp_size ) )
   {
      work_free( work );
      work_copy( work, &hodl_work );
   }
   work->data[ nonce_index ] = swab32( hodl_work.data[ nonce_index ] );
}
Ejemplo n.º 29
0
static void dump_err_buf(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	int i;

	mlx4_err(dev, "Internal error detected:\n");
	for (i = 0; i < priv->fw.catas_size; ++i)
		mlx4_err(dev, "  buf[%02x]: %08x\n",
			 i, swab32(readl(priv->catas_err.map + i)));
}
Ejemplo n.º 30
0
unsigned readl(volatile unsigned *addr)
{
	u32 val;
	unsigned long pa = iopa((unsigned long) addr);

	if (!is_pci_mem(pa))
		return in_le32(addr);

	idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0);
	return swab32(val);
}