static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
{
	ibmlana_priv *priv = netdev_priv(dev);
	int retval = 0, tmplen, addr;
	unsigned long flags;
	tda_t tda;
	int baddr;

	/* find out if there are free slots for a frame to transmit. If not,
	   the upper layer is in deep desperation and we simply ignore the frame. */

	if (priv->txusedcnt >= TXBUFCNT) {
		retval = -EIO;
		priv->stat.tx_dropped++;
		goto tx_done;
	}

	/* copy the frame data into the next free transmit buffer - fillup missing */
	tmplen = skb->len;
	if (tmplen < 60)
		tmplen = 60;
	baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
	memcpy_toio(priv->base + baddr, skb->data, skb->len);

	/* copy filler into RAM - in case we're filling up...
	   we're filling a bit more than necessary, but that doesn't harm
	   since the buffer is far larger...
	   Sorry Linus for the filler string but I couldn't resist ;-) */

	if (tmplen > skb->len) {
		char *fill = "NetBSD is a nice OS too! ";
		unsigned int destoffs = skb->len, l = strlen(fill);

		while (destoffs < tmplen) {
			memcpy_toio(priv->base + baddr + destoffs, fill, l);
			destoffs += l;
		}
	}

	/* set up the new frame descriptor */
	addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
	memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
	tda.length = tda.fraglength = tmplen;
	memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));

	/* if there were no active descriptors, trigger the SONIC */
	spin_lock_irqsave(&priv->lock, flags);

	priv->txusedcnt++;
	priv->txused[priv->nexttxdescr] = 1;

	/* are all transmission slots used up ? */
	if (priv->txusedcnt >= TXBUFCNT)
		netif_stop_queue(dev);

	if (priv->txusedcnt == 1)
		StartTx(dev, priv->nexttxdescr);
	priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;

	spin_unlock_irqrestore(&priv->lock, flags);
tx_done:
	dev_kfree_skb(skb);
	return retval;
}
Exemple #2
0
static void epxa_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
}
struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
{
	void __iomem *oprom = pci_map_biosrom(pdev);
	struct isci_orom *rom = NULL;
	size_t len, i;
	int j;
	char oem_sig[4];
	struct isci_oem_hdr oem_hdr;
	u8 *tmp, sum;

	if (!oprom)
		return NULL;

	len = pci_biosrom_size(pdev);
	rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
	if (!rom) {
		dev_warn(&pdev->dev,
			 "Unable to allocate memory for orom\n");
		return NULL;
	}

	for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
		memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);

		/* we think we found the OEM table */
		if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
			size_t copy_len;

			memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));

			copy_len = min(oem_hdr.len - sizeof(oem_hdr),
				       sizeof(*rom));

			memcpy_fromio(rom,
				      oprom + i + sizeof(oem_hdr),
				      copy_len);

			/* calculate checksum */
			tmp = (u8 *)&oem_hdr;
			for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
				sum += *tmp;

			tmp = (u8 *)rom;
			for (j = 0; j < sizeof(*rom); j++, tmp++)
				sum += *tmp;

			if (sum != 0) {
				dev_warn(&pdev->dev,
					 "OEM table checksum failed\n");
				continue;
			}

			/* keep going if that's not the oem param table */
			if (memcmp(rom->hdr.signature,
				   ISCI_ROM_SIG,
				   ISCI_ROM_SIG_SIZE) != 0)
				continue;

			dev_info(&pdev->dev,
				 "OEM parameter table found in OROM\n");
			break;
		}
	}

	if (i >= len) {
		dev_err(&pdev->dev, "oprom parse error\n");
		devm_kfree(&pdev->dev, rom);
		rom = NULL;
	}
	pci_unmap_biosrom(oprom);

	return rom;
}
Exemple #4
0
void W90N745_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{

	memcpy_fromio(to, map->map_priv_1 + from, len);

}
Exemple #5
0
static void ich2rom_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	memcpy_fromio(to, addr(map, from), len);
}
static int rk_ahash_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
	struct rk_crypto_info *dev = NULL;
	unsigned long flags;
	int ret;

	if (!req->nbytes)
		return zero_message_process(req);

	dev = tctx->dev;
	dev->total = req->nbytes;
	dev->left_bytes = req->nbytes;
	dev->aligned = 0;
	dev->mode = 0;
	dev->align_size = 4;
	dev->sg_dst = NULL;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);

	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA1;
		break;
	case SHA256_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA256;
		break;
	case MD5_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_MD5;
		break;
	default:
		return -EINVAL;
	}

	rk_ahash_reg_init(dev);

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, &req->base);
	spin_unlock_irqrestore(&dev->lock, flags);

	tasklet_schedule(&dev->crypto_tasklet);

	/*
	 * it will take some time to process date after last dma transmission.
	 *
	 * waiting time is relative with the last date len,
	 * so cannot set a fixed time here.
	 * 10-50 makes system not call here frequently wasting
	 * efficiency, and make it response quickly when dma
	 * complete.
	 */
	while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
		usleep_range(10, 50);

	memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
		      crypto_ahash_digestsize(tfm));

	return 0;
}
static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
		   u32 msg_frame_address )
{
	unsigned long flags;
	u32  headptr;
	u32  size;
	int  err;
#ifndef __BIG_ENDIAN
	unsigned int i;
#endif

	spin_lock_irqsave(&mgr->msg_lock, flags);
	err = 0;

	/* copy message descriptor from miXart to driver */
	size                =  readl_be(MIXART_MEM(mgr, msg_frame_address));       /* size of descriptor + response */
	resp->message_id    =  readl_be(MIXART_MEM(mgr, msg_frame_address + 4));   /* dwMessageID */
	resp->uid.object_id =  readl_be(MIXART_MEM(mgr, msg_frame_address + 8));   /* uidDest */
	resp->uid.desc      =  readl_be(MIXART_MEM(mgr, msg_frame_address + 12));  /* */

	if( (size < MSG_DESCRIPTOR_SIZE) || (resp->size < (size - MSG_DESCRIPTOR_SIZE))) {
		err = -EINVAL;
		snd_printk(KERN_ERR "problem with response size = %d\n", size);
		goto _clean_exit;
	}
	size -= MSG_DESCRIPTOR_SIZE;

	memcpy_fromio(resp->data, MIXART_MEM(mgr, msg_frame_address + MSG_HEADER_SIZE ), size);
	resp->size = size;

	/* swap if necessary */
#ifndef __BIG_ENDIAN
	size /= 4; /* u32 size */
	for(i=0; i < size; i++) {
		((u32*)resp->data)[i] = be32_to_cpu(((u32*)resp->data)[i]);
	}
#endif

	/*
	 * free message frame address
	 */
	headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));

	if( (headptr < MSG_OUTBOUND_FREE_STACK) || ( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) {
		err = -EINVAL;
		goto _clean_exit;
	}

	/* give address back to outbound fifo */
	writel_be(msg_frame_address, MIXART_MEM(mgr, headptr));

	/* increment the outbound free head */
	headptr += 4;
	if( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) )
		headptr = MSG_OUTBOUND_FREE_STACK;

	writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));

 _clean_exit:
	spin_unlock_irqrestore(&mgr->msg_lock, flags);

	return err;
}
Exemple #8
0
struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev,
				    const void __iomem * const addr)
{
	struct ccat_eth_priv *priv;
	struct net_device *const netdev = alloc_etherdev(sizeof(*priv));

	priv = netdev_priv(netdev);
	priv->netdev = netdev;
	priv->ccatdev = ccatdev;

	/* ccat register mappings */
	memcpy_fromio(&priv->info, addr, sizeof(priv->info));
	ccat_eth_priv_init_mappings(priv);

	if (ccat_eth_priv_init_dma(priv)) {
		pr_warn("%s(): DMA initialization failed.\n", __FUNCTION__);
		free_netdev(netdev);
		return NULL;
	}

	/* init netdev with MAC and stack callbacks */
	memcpy_fromio(netdev->dev_addr, priv->reg.mii + 8, netdev->addr_len);
	netdev->netdev_ops = &ccat_eth_netdev_ops;

	/* use as EtherCAT device? */
	priv->ecdev = ecdev_offer(netdev, ec_poll_rx, THIS_MODULE);
	if (priv->ecdev) {
		priv->carrier_off = ecdev_carrier_off;
		priv->carrier_ok = ecdev_carrier_ok;
		priv->carrier_on = ecdev_carrier_on;
		priv->kfree_skb_any = ecdev_kfree_skb_any;
		priv->start_queue = ecdev_nop;
		priv->stop_queue = ecdev_nop;
		priv->unregister = unregister_ecdev;

		priv->carrier_off(netdev);
		if (ecdev_open(priv->ecdev)) {
			pr_info("unable to register network device.\n");
			ecdev_withdraw(priv->ecdev);
			ccat_eth_priv_free_dma(priv);
			free_netdev(netdev);
			return NULL;
		}
		return priv;
	}

	/* EtherCAT disabled -> prepare normal ethernet mode */
	priv->carrier_off = netif_carrier_off;
	priv->carrier_ok = netif_carrier_ok;
	priv->carrier_on = netif_carrier_on;
	priv->kfree_skb_any = dev_kfree_skb_any;
	priv->start_queue = netif_start_queue;
	priv->stop_queue = netif_stop_queue;
	priv->unregister = unregister_netdev;

	priv->carrier_off(netdev);
	if (register_netdev(netdev)) {
		pr_info("unable to register network device.\n");
		ccat_eth_priv_free_dma(priv);
		free_netdev(netdev);
		return NULL;
	}
	pr_info("registered %s as network device.\n", netdev->name);
	return priv;
}
Exemple #9
0
void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
{
	void __iomem *ctl = priv->ctl;
        memcpy_fromio(power, ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
		      sizeof(*power));
}
static int ccat_eth_init_netdev(struct ccat_eth_priv *priv)
{
	int status;

	/* init netdev with MAC and stack callbacks */
	memcpy_fromio(priv->netdev->dev_addr, priv->reg.mii + 8,
		      priv->netdev->addr_len);
	priv->netdev->netdev_ops = &ccat_eth_netdev_ops;

	/* use as EtherCAT device? */
	priv->carrier_off = ecdev_carrier_off;
	priv->carrier_ok = ecdev_carrier_ok;
	priv->carrier_on = ecdev_carrier_on;
	priv->kfree_skb_any = ecdev_kfree_skb_any;

	/* It would be more intuitive to check for:
	 * if (priv->func->drv->type == CCATINFO_ETHERCAT_MASTER_DMA) {
	 * unfortunately priv->func->drv is not initialized until probe() returns.
	 * So we check if there is a rx dma fifo registered to determine dma/io mode */
	if (&dma_rx_fifo_ops == priv->rx_fifo.ops) {
		priv->receive = ecdev_receive_dma;
	} else {
		priv->receive = ecdev_receive_eim;
	}
	priv->start_queue = ecdev_nop;
	priv->stop_queue = ecdev_nop;
	priv->unregister = unregister_ecdev;
	priv->ecdev = ecdev_offer(priv->netdev, ec_poll, THIS_MODULE);
	if (priv->ecdev) {
		priv->carrier_off(priv->netdev);
		if (ecdev_open(priv->ecdev)) {
			pr_info("unable to register network device.\n");
			ecdev_withdraw(priv->ecdev);
			ccat_eth_priv_free(priv);
			free_netdev(priv->netdev);
			return -1;	// TODO return better error code
		}
		priv->func->private_data = priv;
		return 0;
	}

	/* EtherCAT disabled -> prepare normal ethernet mode */
	priv->carrier_off = netif_carrier_off;
	priv->carrier_ok = netif_carrier_ok;
	priv->carrier_on = netif_carrier_on;
	priv->kfree_skb_any = dev_kfree_skb_any;
	priv->receive = ccat_eth_receive;
	priv->start_queue = netif_start_queue;
	priv->stop_queue = netif_stop_queue;
	priv->unregister = unregister_netdev;
	priv->carrier_off(priv->netdev);

	status = register_netdev(priv->netdev);
	if (status) {
		pr_info("unable to register network device.\n");
		ccat_eth_priv_free(priv);
		free_netdev(priv->netdev);
		return status;
	}
	pr_info("registered %s as network device.\n", priv->netdev->name);
	priv->func->private_data = priv;
	return 0;
}
Exemple #11
0
static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
{
	struct map_pci_info *map = (struct map_pci_info *)_map;
	memcpy_fromio(to, map->base + map->translate(map, from), len);
}
Exemple #12
0
static int crb_acpi_add(struct acpi_device *device)
{
	struct tpm_chip *chip;
	struct acpi_tpm2 *buf;
	struct crb_priv *priv;
	struct device *dev = &device->dev;
	acpi_status status;
	u32 sm;
	u64 pa;
	int rc;

	chip = tpmm_chip_alloc(dev, &tpm_crb);
	if (IS_ERR(chip))
		return PTR_ERR(chip);

	chip->flags = TPM_CHIP_FLAG_TPM2;

	status = acpi_get_table(ACPI_SIG_TPM2, 1,
				(struct acpi_table_header **) &buf);
	if (ACPI_FAILURE(status)) {
		dev_err(dev, "failed to get TPM2 ACPI table\n");
		return -ENODEV;
	}

	if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
		dev_err(dev, "TPM2 ACPI table has wrong size");
		return -EINVAL;
	}

	priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv),
						GFP_KERNEL);
	if (!priv) {
		dev_err(dev, "failed to devm_kzalloc for private data\n");
		return -ENOMEM;
	}

	sm = le32_to_cpu(buf->start_method);

	/* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
	 * report only ACPI start but in practice seems to require both
	 * ACPI start and CRB start.
	 */
	if (sm == CRB_SM_CRB || sm == CRB_SM_CRB_WITH_ACPI_START ||
	    !strcmp(acpi_device_hid(device), "MSFT0101"))
		priv->flags |= CRB_FL_CRB_START;

	if (sm == CRB_SM_ACPI_START || sm == CRB_SM_CRB_WITH_ACPI_START)
		priv->flags |= CRB_FL_ACPI_START;

	priv->cca = (struct crb_control_area __iomem *)
		devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000);
	if (!priv->cca) {
		dev_err(dev, "ioremap of the control area failed\n");
		return -ENOMEM;
	}

	memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
	pa = le64_to_cpu(pa);
	priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa),
					 ioread32(&priv->cca->cmd_size));
	if (!priv->cmd) {
		dev_err(dev, "ioremap of the command buffer failed\n");
		return -ENOMEM;
	}

	memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
	pa = le64_to_cpu(pa);
	priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa),
					 ioread32(&priv->cca->rsp_size));
	if (!priv->rsp) {
		dev_err(dev, "ioremap of the response buffer failed\n");
		return -ENOMEM;
	}

	chip->vendor.priv = priv;

	/* Default timeouts and durations */
	chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
	chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
	chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
	chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
	chip->vendor.duration[TPM_SHORT] =
		msecs_to_jiffies(TPM2_DURATION_SHORT);
	chip->vendor.duration[TPM_MEDIUM] =
		msecs_to_jiffies(TPM2_DURATION_MEDIUM);
	chip->vendor.duration[TPM_LONG] =
		msecs_to_jiffies(TPM2_DURATION_LONG);

	chip->acpi_dev_handle = device->handle;

	rc = tpm2_do_selftest(chip);
	if (rc)
		return rc;

	return tpm_chip_register(chip);
}
Exemple #13
0
/*
 * sst_device_control - Set Control params
 *
 * @cmd: control cmd to be set
 * @arg: command argument
 *
 * This function is called by MID sound card driver to set
 * SST/Sound card controls for an opened stream.
 * This is registered with MID driver
 */
int sst_device_control(int cmd, void *arg)
{
	int retval = 0, str_id = 0;

	switch (cmd) {
	case SST_SND_PAUSE:
	case SST_SND_RESUME:
	case SST_SND_DROP:
	case SST_SND_START:
		sst_drv_ctx->mad_ops.control_op = cmd;
		sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
		queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
		break;

	case SST_SND_STREAM_INIT: {
		struct pcm_stream_info *str_info;
		struct stream_info *stream;

		pr_debug("stream init called\n");
		str_info = (struct pcm_stream_info *)arg;
		str_id = str_info->str_id;
		retval = sst_validate_strid(str_id);
		if (retval)
			break;

		stream = &sst_drv_ctx->streams[str_id];
		pr_debug("setting the period ptrs\n");
		stream->pcm_substream = str_info->mad_substream;
		stream->period_elapsed = str_info->period_elapsed;
		stream->sfreq = str_info->sfreq;
		stream->prev = stream->status;
		stream->status = STREAM_INIT;
		break;
	}

	case SST_SND_BUFFER_POINTER: {
		struct pcm_stream_info *stream_info;
		struct snd_sst_tstamp fw_tstamp = {0,};
		struct stream_info *stream;


		stream_info = (struct pcm_stream_info *)arg;
		str_id = stream_info->str_id;
		retval = sst_validate_strid(str_id);
		if (retval)
			break;
		stream = &sst_drv_ctx->streams[str_id];

		if (!stream->pcm_substream)
			break;
		memcpy_fromio(&fw_tstamp,
			((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
			+(str_id * sizeof(fw_tstamp))),
			sizeof(fw_tstamp));

		pr_debug("Pointer Query on strid = %d ops %d\n",
						str_id, stream->ops);

		if (stream->ops == STREAM_OPS_PLAYBACK)
			stream_info->buffer_ptr = fw_tstamp.samples_rendered;
		else
			stream_info->buffer_ptr = fw_tstamp.samples_processed;
		pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
			fw_tstamp.samples_rendered, stream_info->buffer_ptr);
		break;
	}
	case SST_ENABLE_RX_TIME_SLOT: {
		int status = *(int *)arg;
		sst_drv_ctx->rx_time_slot_status = status ;
		sst_enable_rx_timeslot(status);
		break;
	}
	default:
		/* Illegal case */
		pr_warn("illegal req\n");
		return -EINVAL;
	}

	return retval;
}
Exemple #14
0
void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
{
	void __iomem *address = lx_dsp_register(chip, port);
	memcpy_fromio(data, address, len*sizeof(u32));
}
Exemple #15
0
int softing_load_fw(const char *file, struct softing *card,
		__iomem uint8_t *dpram, unsigned int size, int offset)
{
	const struct firmware *fw;
	int ret;
	const uint8_t *mem, *end, *dat;
	uint16_t type, len;
	uint32_t addr;
	uint8_t *buf = NULL;
	int buflen = 0;
	int8_t type_end = 0;

	ret = request_firmware(&fw, file, &card->pdev->dev);
	if (ret < 0)
		return ret;
	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
		", offset %c0x%04x\n",
		card->pdat->name, file, (unsigned int)fw->size,
		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
	/* parse the firmware */
	mem = fw->data;
	end = &mem[fw->size];
	/* look for header record */
	ret = fw_parse(&mem, &type, &addr, &len, &dat);
	if (ret < 0)
		goto failed;
	if (type != 0xffff)
		goto failed;
	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
		ret = -EINVAL;
		goto failed;
	}
	/* ok, we had a header */
	while (mem < end) {
		ret = fw_parse(&mem, &type, &addr, &len, &dat);
		if (ret < 0)
			goto failed;
		if (type == 3) {
			/* start address, not used here */
			continue;
		} else if (type == 1) {
			/* eof */
			type_end = 1;
			break;
		} else if (type != 0) {
			ret = -EINVAL;
			goto failed;
		}

		if ((addr + len + offset) > size)
			goto failed;
		memcpy_toio(&dpram[addr + offset], dat, len);
		/* be sure to flush caches from IO space */
		mb();
		if (len > buflen) {
			/* align buflen */
			buflen = (len + (1024-1)) & ~(1024-1);
			buf = krealloc(buf, buflen, GFP_KERNEL);
			if (!buf) {
				ret = -ENOMEM;
				goto failed;
			}
		}
		/* verify record data */
		memcpy_fromio(buf, &dpram[addr + offset], len);
		if (memcmp(buf, dat, len)) {
			/* is not ok */
			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
			ret = -EIO;
			goto failed;
		}
	}
	if (!type_end)
		/* no end record seen */
		goto failed;
	ret = 0;
failed:
	kfree(buf);
	release_firmware(fw);
	if (ret < 0)
		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
	return ret;
}
static void amd766rom_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	memcpy_fromio(to, map->map_priv_1 + from, len);
}
static void
el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
    int boguscount = 0;
    void __iomem *base = ei_status.mem;
    unsigned short int *buf;
    unsigned short word;

    /* Maybe enable shared memory just be to be safe... nahh.*/
    if (base) {	/* Use the shared memory. */
	ring_offset -= (EL2_MB1_START_PG<<8);
	if (ring_offset + count > EL2_MEMSIZE) {
	    /* We must wrap the input move. */
	    int semi_count = EL2_MEMSIZE - ring_offset;
	    memcpy_fromio(skb->data, base + ring_offset, semi_count);
	    count -= semi_count;
	    memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
	} else {
		memcpy_fromio(skb->data, base + ring_offset, count);
	}
	return;
    }

/*
 *  No shared memory, use programmed I/O.
 */
    word = (unsigned short) ring_offset;
    outb(word>>8, E33G_DMAAH);
    outb(word&0xFF, E33G_DMAAL);

    outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
	   | ECNTRL_START, E33G_CNTRL);

/*
 *  Here I also try to get data as fast as possible. I am betting that I
 *  can read one extra byte without clobbering anything in the kernel because
 *  this would only occur on an odd byte-count and allocation of skb->data
 *  is word-aligned. Variable 'count' is NOT checked. Caller must check
 *  for a valid count.
 *  [This is currently quite safe.... but if one day the 3c503 explodes
 *   you know where to come looking ;)]
 */

    buf =  (unsigned short int *) skb->data;
    count =  (count + 1) >> 1;
    for(;;)
    {
        boguscount = 0x1000;
        while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
        {
            if(!boguscount--)
            {
                pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
                el2_reset_8390(dev);
                goto blocked;
            }
        }
        if(count > WRD_COUNT)
        {
            insw(E33G_FIFOH, buf, WRD_COUNT);
            buf   += WRD_COUNT;
            count -= WRD_COUNT;
        }
        else
        {
            insw(E33G_FIFOH, buf, count);
            break;
        }
    }
    blocked:;
    outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
}
Exemple #18
0
/*
 * This routine will assign vring's allocated in host/io memory. Code in
 * virtio_ring.c however continues to access this io memory as if it were local
 * memory without io accessors.
 */
static struct virtqueue *vop_find_vq(struct virtio_device *dev,
                                     unsigned index,
                                     void (*callback)(struct virtqueue *vq),
                                     const char *name)
{
    struct _vop_vdev *vdev = to_vopvdev(dev);
    struct vop_device *vpdev = vdev->vpdev;
    struct mic_vqconfig __iomem *vqconfig;
    struct mic_vqconfig config;
    struct virtqueue *vq;
    void __iomem *va;
    struct _mic_vring_info __iomem *info;
    void *used;
    int vr_size, _vr_size, err, magic;
    struct vring *vr;
    u8 type = ioread8(&vdev->desc->type);

    if (index >= ioread8(&vdev->desc->num_vq))
        return ERR_PTR(-ENOENT);

    if (!name)
        return ERR_PTR(-ENOENT);

    /* First assign the vring's allocated in host memory */
    vqconfig = _vop_vq_config(vdev->desc) + index;
    memcpy_fromio(&config, vqconfig, sizeof(config));
    _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
    vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
    va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
                                vr_size);
    if (!va)
        return ERR_PTR(-ENOMEM);
    vdev->vr[index] = va;
    memset_io(va, 0x0, _vr_size);
    vq = vring_new_virtqueue(
             index,
             le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
             dev,
             false,
             (void __force *)va, vop_notify, callback, name);
    if (!vq) {
        err = -ENOMEM;
        goto unmap;
    }
    info = va + _vr_size;
    magic = ioread32(&info->magic);

    if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
        err = -EIO;
        goto unmap;
    }

    /* Allocate and reassign used ring now */
    vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
                                        sizeof(struct vring_used_elem) *
                                        le16_to_cpu(config.num));
    used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                    get_order(vdev->used_size[index]));
    if (!used) {
        err = -ENOMEM;
        dev_err(_vop_dev(vdev), "%s %d err %d\n",
                __func__, __LINE__, err);
        goto del_vq;
    }
    vdev->used[index] = dma_map_single(&vpdev->dev, used,
                                       vdev->used_size[index],
                                       DMA_BIDIRECTIONAL);
    if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
        err = -ENOMEM;
        dev_err(_vop_dev(vdev), "%s %d err %d\n",
                __func__, __LINE__, err);
        goto free_used;
    }
    writeq(vdev->used[index], &vqconfig->used_address);
    /*
     * To reassign the used ring here we are directly accessing
     * struct vring_virtqueue which is a private data structure
     * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
     * vring_new_virtqueue() would ensure that
     *  (&vq->vring == (struct vring *) (&vq->vq + 1));
     */
    vr = (struct vring *)(vq + 1);
    vr->used = used;

    vq->priv = vdev;
    return vq;
free_used:
    free_pages((unsigned long)used,
               get_order(vdev->used_size[index]));
del_vq:
    vring_del_virtqueue(vq);
unmap:
    vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
    return ERR_PTR(err);
}
Exemple #19
0
static void
ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
	void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
	memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
}
Exemple #20
0
void bcm9XXXX_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	/* tht on the 7038 this cause a databus error, fixed in B0 rev */
	memcpy_fromio(to, map->map_priv_1 + from, len);
}
Exemple #21
0
/**
* sst_process_reply - Processes reply message from SST
*
* @work:	Pointer to work structure
*
* This function is scheduled by ISR
* It take a reply msg from response_queue and
* does action based on msg
*/
void sst_process_reply(struct work_struct *work)
{
	struct sst_ipc_msg_wq *msg =
			container_of(work, struct sst_ipc_msg_wq, wq);

	int str_id = msg->header.part.str_id;
	struct stream_info *str_info;

	switch (msg->header.part.msg_id) {
	case IPC_IA_TARGET_DEV_SELECT:
		if (!msg->header.part.data) {
			sst_drv_ctx->tgt_dev_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
			msg->header.part.msg_id, msg->header.part.data);
			sst_drv_ctx->tgt_dev_blk.ret_code =
					-msg->header.part.data;
		}

		if (sst_drv_ctx->tgt_dev_blk.on == true) {
				sst_drv_ctx->tgt_dev_blk.condition = true;
				wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_ALG_PARAMS: {
		pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
		pr_debug("sst: data value %x\n", msg->header.part.data);
		pr_debug("sst: large value %x\n", msg->header.part.large);

		if (!msg->header.part.large) {
			if (!msg->header.part.data) {
				pr_debug("sst: alg set success\n");
				sst_drv_ctx->ppp_params_blk.ret_code = 0;
			} else {
				pr_debug("sst: alg set failed\n");
				sst_drv_ctx->ppp_params_blk.ret_code =
							-msg->header.part.data;
			}

		} else if (msg->header.part.data) {
			struct snd_ppp_params *mailbox_params, *get_params;
			char *params;

			pr_debug("sst: alg get success\n");
			mailbox_params = (struct snd_ppp_params *)msg->mailbox;
			get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
			if (get_params == NULL) {
				pr_err("sst: out of memory for ALG PARAMS");
				break;
			}
			memcpy_fromio(get_params, mailbox_params,
							sizeof(*get_params));
			get_params->params = kzalloc(mailbox_params->size,
							GFP_KERNEL);
			if (get_params->params == NULL) {
				kfree(get_params);
				pr_err("sst: out of memory for ALG PARAMS block");
				break;
			}
			params = msg->mailbox;
			params = params + sizeof(*mailbox_params) - sizeof(u32);
			memcpy_fromio(get_params->params, params,
							get_params->size);
			sst_drv_ctx->ppp_params_blk.ret_code = 0;
			sst_drv_ctx->ppp_params_blk.data = get_params;
		}

		if (sst_drv_ctx->ppp_params_blk.on == true) {
			sst_drv_ctx->ppp_params_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	}

	case IPC_IA_TUNING_PARAMS: {
		pr_debug("sst:IPC_TUNING_PARAMS resp: %x\n", msg->header.full);
		pr_debug("data value %x\n", msg->header.part.data);
		if (msg->header.part.large) {
			pr_debug("alg set failed\n");
			sst_drv_ctx->ppp_params_blk.ret_code =
							-msg->header.part.data;
		} else {
			pr_debug("alg set success\n");
			sst_drv_ctx->ppp_params_blk.ret_code = 0;
		}
		if (sst_drv_ctx->ppp_params_blk.on == true) {
			sst_drv_ctx->ppp_params_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
	}

	case IPC_IA_GET_FW_INFO: {
		struct snd_sst_fw_info *fw_info =
			(struct snd_sst_fw_info *)msg->mailbox;
		if (msg->header.part.large) {
			int major = fw_info->fw_version.major;
			int minor = fw_info->fw_version.minor;
			int build = fw_info->fw_version.build;
			pr_debug("Msg succeeded %x\n",
				       msg->header.part.msg_id);
			pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
					major, minor, build);
			memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
				((struct snd_sst_fw_info *)(msg->mailbox)),
				sizeof(struct snd_sst_fw_info));
			sst_drv_ctx->fw_info_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
			msg->header.part.msg_id, msg->header.part.data);
			sst_drv_ctx->fw_info_blk.ret_code =
					-msg->header.part.data;
		}
		if (sst_drv_ctx->fw_info_blk.on == true) {
			pr_debug("Memcopy succeeded\n");
			sst_drv_ctx->fw_info_blk.on = false;
			sst_drv_ctx->fw_info_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	}
	case IPC_IA_SET_STREAM_MUTE:
		if (!msg->header.part.data) {
			pr_debug("Msg succeeded %x\n",
				       msg->header.part.msg_id);
			sst_drv_ctx->mute_info_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
			msg->header.part.msg_id, msg->header.part.data);
			sst_drv_ctx->mute_info_blk.ret_code =
					-msg->header.part.data;

		}
		if (sst_drv_ctx->mute_info_blk.on == true) {
			sst_drv_ctx->mute_info_blk.on = false;
			sst_drv_ctx->mute_info_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_SET_STREAM_VOL:
		if (!msg->header.part.data) {
			pr_debug("Msg succeeded %x\n",
				       msg->header.part.msg_id);
			sst_drv_ctx->vol_info_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
					msg->header.part.msg_id,
			msg->header.part.data);
			sst_drv_ctx->vol_info_blk.ret_code =
					-msg->header.part.data;

		}

		if (sst_drv_ctx->vol_info_blk.on == true) {
			sst_drv_ctx->vol_info_blk.on = false;
			sst_drv_ctx->vol_info_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_GET_STREAM_VOL:
		if (msg->header.part.large) {
			pr_debug("Large Msg Received Successfully\n");
			pr_debug("Msg succeeded %x\n",
				       msg->header.part.msg_id);
			memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
				(void *) msg->mailbox,
				sizeof(struct snd_sst_vol));
			sst_drv_ctx->vol_info_blk.ret_code = 0;
		} else {
			pr_err("Msg %x reply error %x\n",
			msg->header.part.msg_id, msg->header.part.data);
			sst_drv_ctx->vol_info_blk.ret_code =
					-msg->header.part.data;
		}
		if (sst_drv_ctx->vol_info_blk.on == true) {
			sst_drv_ctx->vol_info_blk.on = false;
			sst_drv_ctx->vol_info_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;

	case IPC_IA_GET_STREAM_PARAMS:
		if (sst_validate_strid(str_id)) {
			pr_err("stream id %d invalid\n", str_id);
			break;
		}
		str_info = &sst_drv_ctx->streams[str_id];
		if (msg->header.part.large) {
			pr_debug("Get stream large success\n");
			memcpy_fromio(str_info->ctrl_blk.data,
				((void *)(msg->mailbox)),
				sizeof(struct snd_sst_fw_get_stream_params));
			str_info->ctrl_blk.ret_code = 0;
		} else {
			pr_err("Msg %x reply error %x\n",
				msg->header.part.msg_id, msg->header.part.data);
			str_info->ctrl_blk.ret_code = -msg->header.part.data;
		}
		if (str_info->ctrl_blk.on == true) {
			str_info->ctrl_blk.on = false;
			str_info->ctrl_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_DECODE_FRAMES:
		if (sst_validate_strid(str_id)) {
			pr_err("stream id %d invalid\n", str_id);
			break;
		}
		str_info = &sst_drv_ctx->streams[str_id];
		if (msg->header.part.large) {
			pr_debug("Msg succeeded %x\n",
				       msg->header.part.msg_id);
			memcpy_fromio(str_info->data_blk.data,
					((void *)(msg->mailbox)),
					sizeof(struct snd_sst_decode_info));
			str_info->data_blk.ret_code = 0;
		} else {
			pr_err("Msg %x reply error %x\n",
				msg->header.part.msg_id, msg->header.part.data);
			str_info->data_blk.ret_code = -msg->header.part.data;
		}
		if (str_info->data_blk.on == true) {
			str_info->data_blk.on = false;
			str_info->data_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_DRAIN_STREAM:
		if (sst_validate_strid(str_id)) {
			pr_err("stream id %d invalid\n", str_id);
			break;
		}
		str_info = &sst_drv_ctx->streams[str_id];
		if (!msg->header.part.data) {
			pr_debug("Msg succeeded %x\n",
					msg->header.part.msg_id);
			str_info->ctrl_blk.ret_code = 0;

		} else {
			pr_err(" Msg %x reply error %x\n",
				msg->header.part.msg_id, msg->header.part.data);
			str_info->ctrl_blk.ret_code = -msg->header.part.data;

		}
		str_info = &sst_drv_ctx->streams[str_id];
		if (str_info->data_blk.on == true) {
			str_info->data_blk.on = false;
			str_info->data_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;

	case IPC_IA_DROP_STREAM:
		if (sst_validate_strid(str_id)) {
			pr_err("str id %d invalid\n", str_id);
			break;
		}
		str_info = &sst_drv_ctx->streams[str_id];
		if (msg->header.part.large) {
			struct snd_sst_drop_response *drop_resp =
				(struct snd_sst_drop_response *)msg->mailbox;

			pr_debug("Drop ret bytes %x\n", drop_resp->bytes);

			str_info->curr_bytes = drop_resp->bytes;
			str_info->ctrl_blk.ret_code =  0;
		} else {
			pr_err(" Msg %x reply error %x\n",
				msg->header.part.msg_id, msg->header.part.data);
			str_info->ctrl_blk.ret_code = -msg->header.part.data;
		}
		if (str_info->ctrl_blk.on == true) {
			str_info->ctrl_blk.on = false;
			str_info->ctrl_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_ENABLE_RX_TIME_SLOT:
		if (!msg->header.part.data) {
			pr_debug("RX_TIME_SLOT success\n");
			sst_drv_ctx->hs_info_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
				msg->header.part.msg_id,
				msg->header.part.data);
			sst_drv_ctx->hs_info_blk.ret_code =
				-msg->header.part.data;
		}
		if (sst_drv_ctx->hs_info_blk.on == true) {
			sst_drv_ctx->hs_info_blk.on = false;
			sst_drv_ctx->hs_info_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_PAUSE_STREAM:
	case IPC_IA_RESUME_STREAM:
	case IPC_IA_SET_STREAM_PARAMS:
		str_info = &sst_drv_ctx->streams[str_id];
		if (!msg->header.part.data) {
			pr_debug("Msg succeeded %x\n",
					msg->header.part.msg_id);
			str_info->ctrl_blk.ret_code = 0;
		} else {
			pr_err(" Msg %x reply error %x\n",
					msg->header.part.msg_id,
					msg->header.part.data);
			str_info->ctrl_blk.ret_code = -msg->header.part.data;
		}
		if (sst_validate_strid(str_id)) {
			pr_err(" stream id %d invalid\n", str_id);
			break;
		}

		if (str_info->ctrl_blk.on == true) {
			str_info->ctrl_blk.on = false;
			str_info->ctrl_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;

	case IPC_IA_FREE_STREAM:
		str_info = &sst_drv_ctx->streams[str_id];
		if (!msg->header.part.data) {
			pr_debug("Stream %d freed\n", str_id);
		} else {
			pr_err("Free for %d ret error %x\n",
				       str_id, msg->header.part.data);
		}
		if (str_info->ctrl_blk.on == true) {
			str_info->ctrl_blk.on = false;
			str_info->ctrl_blk.condition = true;
			wake_up(&sst_drv_ctx->wait_queue);
		}
		break;
	case IPC_IA_ALLOC_STREAM: {
		/* map to stream, call play */
		struct snd_sst_alloc_response *resp =
				(struct snd_sst_alloc_response *)msg->mailbox;
		if (resp->str_type.result)
			pr_err("error alloc stream = %x\n",
				       resp->str_type.result);
		sst_alloc_stream_response(str_id, resp);
		break;
	}

	case IPC_IA_PLAY_FRAMES:
	case IPC_IA_CAPT_FRAMES:
		if (sst_validate_strid(str_id)) {
			pr_err("stream id %d invalid\n", str_id);
			break;
		}
		pr_debug("Ack for play/capt frames received\n");
		break;

	case IPC_IA_PREP_LIB_DNLD: {
		struct snd_sst_str_type *str_type =
			(struct snd_sst_str_type *)msg->mailbox;
		pr_debug("Prep Lib download %x\n",
				msg->header.part.msg_id);
		if (str_type->result)
			pr_err("Prep lib download %x\n", str_type->result);
		else
			pr_debug("Can download codec now...\n");
		sst_wake_up_alloc_block(sst_drv_ctx, str_id,
				str_type->result, NULL);
		break;
	}

	case IPC_IA_LIB_DNLD_CMPLT: {
		struct snd_sst_lib_download_info *resp =
			(struct snd_sst_lib_download_info *)msg->mailbox;
		int retval = resp->result;

		pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
		if (resp->result) {
			pr_err("err in lib dload %x\n", resp->result);
		} else {
			pr_debug("Codec download complete...\n");
			pr_debug("codec Type %d Ver %d Built %s: %s\n",
				resp->dload_lib.lib_info.lib_type,
				resp->dload_lib.lib_info.lib_version,
				resp->dload_lib.lib_info.b_date,
				resp->dload_lib.lib_info.b_time);
		}
		sst_wake_up_alloc_block(sst_drv_ctx, str_id,
						retval, NULL);
		break;
	}

	case IPC_IA_GET_FW_VERSION: {
		struct ipc_header_fw_init *version =
				(struct ipc_header_fw_init *)msg->mailbox;
		int major = version->fw_version.major;
		int minor = version->fw_version.minor;
		int build = version->fw_version.build;
		dev_info(&sst_drv_ctx->pci->dev,
			"INFO: ***LOADED SST FW VERSION*** = %02d.%02d.%02d\n",
		major, minor, build);
		break;
	}
	case IPC_IA_GET_FW_BUILD_INF: {
		struct sst_fw_build_info *build =
			(struct sst_fw_build_info *)msg->mailbox;
		pr_debug("Build date:%sTime:%s", build->date, build->time);
		break;
	}
	case IPC_IA_SET_PMIC_TYPE:
		break;
	case IPC_IA_START_STREAM:
		pr_debug("reply for START STREAM %x\n", msg->header.full);
		break;

	case IPC_IA_GET_FW_CTXT:
		pr_debug("reply for get fw ctxt  %x\n", msg->header.full);
		if (msg->header.part.data)
			sst_drv_ctx->fw_cntx_size = 0;
		else
			sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
		pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
		sst_wake_up_alloc_block(
			sst_drv_ctx, str_id, msg->header.part.data, NULL);
		break;
	default:
		/* Illegal case */
		pr_err("process reply:default = %x\n", msg->header.full);
	}
	sst_clear_interrupt();
	return;
}
Exemple #22
0
ssize_t silly_read(struct file *filp, char *buf, size_t count, loff_t *f_pos)
{
    int retval;
    int mode = MINOR(INODE_FROM_F(filp)->i_rdev);
    void *add;
    unsigned long isa_addr = ISA_BASE + *f_pos;
    unsigned char *kbuf, *ptr;


    if (mode == M_joke) return 0;  /* no read on /dev/silliest */

    if (mode == M_vga) {
        isa_addr = VGA_BASE + *f_pos; /* range: 0xB8000-0xC0000 */
        if (isa_addr + count > VIDEO_MAX)
            count = VIDEO_MAX - isa_addr;
        mode = M_32; /* and fall back to normal xfer */
    }
    else 
        if (isa_addr + count > ISA_MAX) /* range: 0xA0000-0x100000 */
            count = ISA_MAX - isa_addr;

    /*
     * too big an f_pos (caused by a malicious lseek())
     * would result in a negative count
     */
    if (count < 0) return 0;

    kbuf = kmalloc(count, GFP_KERNEL);
    if (!kbuf) return -ENOMEM;
    ptr=kbuf;
    retval=count;
   /*
    * Convert our address into our remapped area.
    */
    add = io_base + (isa_addr - ISA_BASE);
    /*
     * kbuf is aligned, but the reads might not. In order not to
     * drive me mad with unaligned leading and trailing bytes,
     * I downgrade the `mode' if unaligned xfers are requested.
     */

    if (mode==M_32 && ((isa_addr | count) & 3))
        mode = M_16;
    if (mode==M_16 && ((isa_addr | count) & 1))
        mode = M_8;

    switch(mode) {
      case M_32: 
        while (count >= 4) {
            *(u32 *)ptr = readl(add);
            add+=4; count-=4; ptr+=4;
        }
        break;
            
      case M_16: 
        while (count >= 2) {
            *(u16 *)ptr = readw(add);
            add+=2; count-=2; ptr+=2;
        }
        break;
            
      case M_8: 
        while (count) {
            *ptr = readb(add);
            add++; count--; ptr++;
        }
        break;

      case M_memcpy:
        memcpy_fromio(ptr, add, count);
        break;

      default:
        return -EINVAL;
    }
    if (retval > 0)
        copy_to_user(buf, kbuf, retval);
    kfree(kbuf);
    *f_pos += retval;
    return retval;
}
Exemple #23
0
int mcc_initialize_shared_mem(void)
{
	int i,j;
	int return_value = MCC_SUCCESS;
	char init_string[INIT_STRING_LEN];
	unsigned char *bkdata;

	// critical region for shared memory begins
	if(mcc_sema4_grab())
	{
		mcc_deinitialize_shared_mem();
		return -EBUSY;
	}

	memcpy_fromio(init_string, bookeeping_data->init_string, INIT_STRING_LEN);

	/* Initialize the bookeeping structure */
	if(memcmp(init_string, MCC_INIT_STRING, INIT_STRING_LEN))
	{
		printk(KERN_DEBUG "at entry, bookeeping_data not initialized\n");

		// zero it all - no guarantee Linux or uboot didnt touch it before it was reserved
		bkdata = (unsigned char *)bookeeping_data;
		for(i=0; i<sizeof(struct mcc_bookeeping_struct); i++)
			bkdata[i] = 0;
		//memset(bookeeping_data, 0, sizeof(struct mcc_bookeeping_struct));

		// Now initialize all the non-zero items
		
		/* Set init_flag in case it has not been set yet by another core */
		//memcpy(bookeeping_data->init_string, MCC_INIT_STRING, INIT_STRING_LEN);
		memcpy_toio(bookeeping_data->init_string, MCC_INIT_STRING, INIT_STRING_LEN);

	    	/* Set version_string */
		memcpy_toio(bookeeping_data->version_string, MCC_VERSION_STRING, VERSION_STRING_LEN);

		/* Initialize the free list */
		bookeeping_data->free_list.head = (MCC_RECEIVE_BUFFER *)VIRT_TO_MQX(&bookeeping_data->r_buffers[0]);
		bookeeping_data->free_list.tail = (MCC_RECEIVE_BUFFER *)VIRT_TO_MQX(&bookeeping_data->r_buffers[MCC_ATTR_NUM_RECEIVE_BUFFERS-1]);

		/* Initialize receive buffers */
		for(i=0; i<MCC_ATTR_NUM_RECEIVE_BUFFERS-1; i++)
		{
			bookeeping_data->r_buffers[i].next = (MCC_RECEIVE_BUFFER *)VIRT_TO_MQX(&bookeeping_data->r_buffers[i+1]);
		}
		bookeeping_data->r_buffers[MCC_ATTR_NUM_RECEIVE_BUFFERS-1].next = null;
	}
	else
		printk(KERN_DEBUG "at entry, bookeeping_data was initialized\n");

	for(i=0; i<MCC_NUM_CORES; i++)
	{
		for(j=0; j<MCC_MAX_OUTSTANDING_SIGNALS; j++)
		{
			bookeeping_data->signals_received[i][j].type = 11;
			bookeeping_data->signals_received[i][j].destination.core = 22;
			bookeeping_data->signals_received[i][j].destination.node = 33;
			bookeeping_data->signals_received[i][j].destination.port = 44;
		}
	}

	// critical region for shared memory ends
	mcc_sema4_release();

	return return_value;
}
int awc_i365_probe_once(struct i365_socket * s ) {


    int caps=i365_in(s, 0);
    int ret;
    unsigned long jiff;
//	short rev	= 0x3000;
    unsigned char cis [0x3e3];
    unsigned char * mem = phys_to_virt(0xd000);
    int i;
    int port ;

    DEBUG(1," i365 control ID %x \n", caps);

    if (caps & 0xC) {
        return 1;
    };

    ret = i365_in(s, 0x1);

    if ((ret & 0xC0) != 0xC0) {
        printk("card in socket %d port %x not in known state, %x \n",
               s->socket, s->offset_port, ret );
        return -1;
    };


    awc_i365_card_release(s);


    mdelay(100);

    i365_out(s, 0x2, 0x10 ); 	// power enable
    mdelay(200);

    i365_out(s, 0x2, 0x10 | 0x01 | 0x04 | 0x80);	//power enable

    mdelay(250);

    if (!s->irq)
        s->irq = 11;

    i365_out(s, 0x3, 0x40 | 0x20 | s->irq);

    jiff = jiffies;

    while (jiffies-jiff < HZ )
        if (i365_in(s,0x1) & 0x20)
            break;

    if (! (i365_in(s,0x1) & 0x20) ) {
        printk("irq enable timeout on socket %x \n", s->socket);
        return -1;
    };

    i365_out(s,0x10,0xd0);
    i365_out(s,0x11,0x0);
    i365_out(s,0x12,0xd0);
    i365_out(s,0x13,0x0);
    i365_out(s,0x14,0x30 );
    i365_out(s,0x15,0x3f | 0x40);		// enab mem reg bit
    i365_out(s,0x06,0x01);			// enab mem

    mdelay(10);

    cis[0] = 0x45;

//	memcpy_toio( 0xd3e0, &(cis[0]),0x1);

//	mem[0x3e0] = 0x0;
//	mem[0] = 0x45;

    mem[0x3e0] = 0x45;

    mdelay(10);

    memcpy_fromio(cis,0xD000, 0x3e0);

    for (i = 0; i <= 0x3e2; i++)
        printk("%02x", mem[i]);
    for (i = 0; i <= 0x3e2; i++)
        printk("%c", mem[i]);

    i=0;
    while (i < 0x3e0) {
        if (cis[i] == 0xff)
            break;
        if (cis[i] != 0x20 ) {
            i = i + 2 + cis[i+1];
            continue;
        } else {
            s->manufacturer = cis[i+2] | (cis[i+3]<<8);
            s->product	= cis[i+4] | (cis[i+5]<<8);
            break;
        };
        i++;
    };

    DEBUG(1,"socket %x manufacturer %x product %x \n",
          s->socket, s->manufacturer,s->product);

    i365_out(s,0x07, 0x1 | 0x2); 		// enable io 16bit
    mdelay(1);
    port = s->io;
    i365_out(s,0x08, port & 0xff);
    i365_out(s,0x09, (port & 0xff00)/ 0x100);
    i365_out(s,0x0A, (port+port_range) & 0xff);
    i365_out(s,0x0B, ((port+port_range) & 0xff00) /0x100);

    i365_out(s,0x06, 0x40); 		// enable io window

    mdelay(1);

    i365_out(s,0x3e0,0x45);

    outw(0x10, s->io);

    jiff = jiffies;
    while (!(inw(s->io + 0x30) & 0x10)) {

        if (jiffies - jiff > HZ ) {

            printk("timed out waitin for command ack \n");
            break;
        }
    };


    outw(0x10, s->io + 0x34);
    mdelay(10);

    return 0;

};
Exemple #25
0
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
			       struct drm_i915_gem_object *src,
			       const int num_pages)
{
	struct drm_i915_error_object *dst;
	int i;
	u32 reloc_offset;

	if (src == NULL || src->pages == NULL)
		return NULL;

	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
	if (dst == NULL)
		return NULL;

	reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
	for (i = 0; i < num_pages; i++) {
		unsigned long flags;
		void *d;

		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
		if (d == NULL)
			goto unwind;

		local_irq_save(flags);
		if (reloc_offset < dev_priv->gtt.mappable_end &&
		    src->has_global_gtt_mapping) {
			void __iomem *s;

			/* Simply ignore tiling or any overlapping fence.
			 * It's part of the error state, and this hopefully
			 * captures what the GPU read.
			 */

			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
						     reloc_offset);
			memcpy_fromio(d, s, PAGE_SIZE);
			io_mapping_unmap_atomic(s);
		} else if (src->stolen) {
			unsigned long offset;

			offset = dev_priv->mm.stolen_base;
			offset += src->stolen->start;
			offset += i << PAGE_SHIFT;

			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
		} else {
			struct page *page;
			void *s;

			page = i915_gem_object_get_page(src, i);

			drm_clflush_pages(&page, 1);

			s = kmap_atomic(page);
			memcpy(d, s, PAGE_SIZE);
			kunmap_atomic(s);

			drm_clflush_pages(&page, 1);
		}
		local_irq_restore(flags);

		dst->pages[i] = d;

		reloc_offset += PAGE_SIZE;
	}
	dst->page_count = num_pages;

	return dst;

unwind:
	while (i--)
		kfree(dst->pages[i]);
	kfree(dst);
	return NULL;
}
int softing_load_fw(const char *file, struct softing *card,
		__iomem uint8_t *dpram, unsigned int size, int offset)
{
	const struct firmware *fw;
	int ret;
	const uint8_t *mem, *end, *dat;
	uint16_t type, len;
	uint32_t addr;
	uint8_t *buf = NULL;
	int buflen = 0;
	int8_t type_end = 0;

	ret = request_firmware(&fw, file, &card->pdev->dev);
	if (ret < 0)
		return ret;
	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
		", offset %c0x%04x\n",
		card->pdat->name, file, (unsigned int)fw->size,
		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
	
	mem = fw->data;
	end = &mem[fw->size];
	
	ret = fw_parse(&mem, &type, &addr, &len, &dat);
	if (ret < 0)
		goto failed;
	if (type != 0xffff)
		goto failed;
	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
		ret = -EINVAL;
		goto failed;
	}
	
	while (mem < end) {
		ret = fw_parse(&mem, &type, &addr, &len, &dat);
		if (ret < 0)
			goto failed;
		if (type == 3) {
			
			continue;
		} else if (type == 1) {
			
			type_end = 1;
			break;
		} else if (type != 0) {
			ret = -EINVAL;
			goto failed;
		}

		if ((addr + len + offset) > size)
			goto failed;
		memcpy_toio(&dpram[addr + offset], dat, len);
		
		mb();
		if (len > buflen) {
			
			buflen = (len + (1024-1)) & ~(1024-1);
			buf = krealloc(buf, buflen, GFP_KERNEL);
			if (!buf) {
				ret = -ENOMEM;
				goto failed;
			}
		}
		
		memcpy_fromio(buf, &dpram[addr + offset], len);
		if (memcmp(buf, dat, len)) {
			
			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
			ret = -EIO;
			goto failed;
		}
	}
	if (!type_end)
		
		goto failed;
	ret = 0;
failed:
	kfree(buf);
	release_firmware(fw);
	if (ret < 0)
		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
	return ret;
}
Exemple #27
0
void ram_copyfromcard(eicon_card *card, void *adrto, void *adr, int len) {
    memcpy_fromio(adrto, adr, len);
}
void cdb89712_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
	// printk ("cdb89712_copy_from: 0x%x@0x%x -> 0x%x\n", len, from, to);
	memcpy_fromio(to, map->map_priv_1 + from, len);
}
Exemple #29
0
static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch)
{
	int qleft = 0;
	u8 linestatus = 0;
	u8 error_mask = 0;
	int n = 0;
	int total = 0;
	u16 head;
	u16 tail;

	if (!ch)
		return;

	/* cache head and tail of queue */
	head = ch->ch_r_head & RQUEUEMASK;
	tail = ch->ch_r_tail & RQUEUEMASK;

	/* Get our cached LSR */
	linestatus = ch->ch_cached_lsr;
	ch->ch_cached_lsr = 0;

	/* Store how much space we have left in the queue */
	if ((qleft = tail - head - 1) < 0)
		qleft += RQUEUEMASK + 1;

	/*
	 * If the UART is not in FIFO mode, force the FIFO copy to
	 * NOT be run, by setting total to 0.
	 *
	 * On the other hand, if the UART IS in FIFO mode, then ask
	 * the UART to give us an approximation of data it has RX'ed.
	 */
	if (!(ch->ch_flags & CH_FIFO_ENABLED))
		total = 0;
	else {
		total = readb(&ch->ch_neo_uart->rfifo);

		/*
		 * EXAR chip bug - RX FIFO COUNT - Fudge factor.
		 *
		 * This resolves a problem/bug with the Exar chip that sometimes
		 * returns a bogus value in the rfifo register.
		 * The count can be any where from 0-3 bytes "off".
		 * Bizarre, but true.
		 */
		total -= 3;
	}

	/*
	 * Finally, bound the copy to make sure we don't overflow
	 * our own queue...
	 * The byte by byte copy loop below this loop this will
	 * deal with the queue overflow possibility.
	 */
	total = min(total, qleft);

	while (total > 0) {
		/*
		 * Grab the linestatus register, we need to check
		 * to see if there are any errors in the FIFO.
		 */
		linestatus = readb(&ch->ch_neo_uart->lsr);

		/*
		 * Break out if there is a FIFO error somewhere.
		 * This will allow us to go byte by byte down below,
		 * finding the exact location of the error.
		 */
		if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
			break;

		/* Make sure we don't go over the end of our queue */
		n = min(((u32) total), (RQUEUESIZE - (u32) head));

		/*
		 * Cut down n even further if needed, this is to fix
		 * a problem with memcpy_fromio() with the Neo on the
		 * IBM pSeries platform.
		 * 15 bytes max appears to be the magic number.
		 */
		n = min((u32) n, (u32) 12);

		/*
		 * Since we are grabbing the linestatus register, which
		 * will reset some bits after our read, we need to ensure
		 * we don't miss our TX FIFO emptys.
		 */
		if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR))
			ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);

		linestatus = 0;

		/* Copy data from uart to the queue */
		memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
		/*
		 * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
		 * that all the data currently in the FIFO is free of
		 * breaks and parity/frame/orun errors.
		 */
		memset(ch->ch_equeue + head, 0, n);

		/* Add to and flip head if needed */
		head = (head + n) & RQUEUEMASK;
		total -= n;
		qleft -= n;
		ch->ch_rxcount += n;
	}

	/*
	 * Create a mask to determine whether we should
	 * insert the character (if any) into our queue.
	 */
	if (ch->ch_c_iflag & IGNBRK)
		error_mask |= UART_LSR_BI;

	/*
	 * Now cleanup any leftover bytes still in the UART.
	 * Also deal with any possible queue overflow here as well.
	 */
	while (1) {

		/*
		 * Its possible we have a linestatus from the loop above
		 * this, so we "OR" on any extra bits.
		 */
		linestatus |= readb(&ch->ch_neo_uart->lsr);

		/*
		 * If the chip tells us there is no more data pending to
		 * be read, we can then leave.
		 * But before we do, cache the linestatus, just in case.
		 */
		if (!(linestatus & UART_LSR_DR)) {
			ch->ch_cached_lsr = linestatus;
			break;
		}

		/* No need to store this bit */
		linestatus &= ~UART_LSR_DR;

		/*
		 * Since we are grabbing the linestatus register, which
		 * will reset some bits after our read, we need to ensure
		 * we don't miss our TX FIFO emptys.
		 */
		if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
			linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
			ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
		}

		/*
		 * Discard character if we are ignoring the error mask.
		 */
		if (linestatus & error_mask) {
			u8 discard;
			linestatus = 0;
			memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
			continue;
		}

		/*
		 * If our queue is full, we have no choice but to drop some data.
		 * The assumption is that HWFLOW or SWFLOW should have stopped
		 * things way way before we got to this point.
		 *
		 * I decided that I wanted to ditch the oldest data first,
		 * I hope thats okay with everyone? Yes? Good.
		 */
		while (qleft < 1) {
			jsm_dbg(READ, &ch->ch_bd->pci_dev,
				"Queue full, dropping DATA:%x LSR:%x\n",
				ch->ch_rqueue[tail], ch->ch_equeue[tail]);

			ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
			ch->ch_err_overrun++;
			qleft++;
		}

		memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
		ch->ch_equeue[head] = (u8) linestatus;

		jsm_dbg(READ, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n",
			ch->ch_rqueue[head], ch->ch_equeue[head]);

		/* Ditch any remaining linestatus value. */
		linestatus = 0;

		/* Add to and flip head if needed */
		head = (head + 1) & RQUEUEMASK;

		qleft--;
		ch->ch_rxcount++;
	}

	/*
	 * Write new final heads to channel structure.
	 */
	ch->ch_r_head = head & RQUEUEMASK;
	ch->ch_e_head = head & EQUEUEMASK;
	jsm_input(ch);
}
static void irqrx_handler(struct net_device *dev)
{
	ibmlana_priv *priv = netdev_priv(dev);
	rda_t rda;
	u32 rdaaddr, lrdaaddr;

	/* loop until ... */

	while (1) {
		/* read descriptor that was next to be filled by SONIC */

		rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
		lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
		memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));

		/* iron out upper word halves of fields we use - SONIC will duplicate
		   bits 0..15 to 16..31 */

		rda.status &= 0xffff;
		rda.length &= 0xffff;
		rda.startlo &= 0xffff;

		/* stop if the SONIC still owns it, i.e. there is no data for us */

		if (rda.inuse)
			break;

		/* good packet? */

		else if (rda.status & RCREG_PRX) {
			struct sk_buff *skb;

			/* fetch buffer */

			skb = dev_alloc_skb(rda.length + 2);
			if (skb == NULL)
				priv->stat.rx_dropped++;
			else {
				/* copy out data */

				memcpy_fromio(skb_put(skb, rda.length),
					       priv->base +
					       rda.startlo, rda.length);

				/* set up skb fields */

				skb->dev = dev;
				skb->protocol = eth_type_trans(skb, dev);
				skb->ip_summed = CHECKSUM_NONE;

				/* bookkeeping */
				dev->last_rx = jiffies;
				priv->stat.rx_packets++;
				priv->stat.rx_bytes += rda.length;

				/* pass to the upper layers */
				netif_rx(skb);
			}
		}

		/* otherwise check error status bits and increase statistics */

		else {
			priv->stat.rx_errors++;
			if (rda.status & RCREG_FAER)
				priv->stat.rx_frame_errors++;
			if (rda.status & RCREG_CRCR)
				priv->stat.rx_crc_errors++;
		}

		/* descriptor processed, will become new last descriptor in queue */

		rda.link = 1;
		rda.inuse = 1;
		memcpy_toio(priv->base + rdaaddr, &rda,
			     sizeof(rda_t));

		/* set up link and EOL = 0 in currently last descriptor. Only write
		   the link field since the SONIC may currently already access the
		   other fields. */

		memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);

		/* advance indices */

		priv->lastrxdescr = priv->nextrxdescr;
		if ((++priv->nextrxdescr) >= priv->rxbufcnt)
			priv->nextrxdescr = 0;
	}
}