Esempio n. 1
0
/* sys_cacheflush -- flush (part of) the processor cache.  */
asmlinkage int
sys_cacheflush (unsigned long addr, unsigned long len, int op)
{
	struct vm_area_struct *vma;

	if ((op < 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
		return -EINVAL;

	/*
	 * Verify that the specified address region actually belongs
	 * to this process.
	 */
	if (addr + len < addr)
		return -EFAULT;
	vma = find_vma (current->mm, addr);
	if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
		return -EFAULT;

	switch (op & CACHEFLUSH_D_PURGE) {
		case CACHEFLUSH_D_INVAL:
			__flush_invalidate_region(addr, len);
			break;
		case CACHEFLUSH_D_WB:
			__flush_wback_region(addr, len);
			break;
		case CACHEFLUSH_D_PURGE:
			__flush_purge_region(addr, len);
			break;
	}
	if (op & CACHEFLUSH_I) {
		flush_icache_all();
	}

	return 0;
}
Esempio n. 2
0
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

	vto = kmap_atomic(to);

	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
	    test_bit(PG_dcache_clean, &from->flags)) {
		vfrom = kmap_coherent(from, vaddr);
		copy_page(vto, vfrom);
		kunmap_coherent(vfrom);
	} else {
		vfrom = kmap_atomic(from);
		copy_page(vto, vfrom);
		kunmap_atomic(vfrom);
	}

	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
	    (vma->vm_flags & VM_EXEC))
		__flush_purge_region(vto, PAGE_SIZE);

	kunmap_atomic(vto);
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
Esempio n. 3
0
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *kaddr = kmap_atomic(page);

	clear_page(kaddr);

	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
		__flush_purge_region(kaddr, PAGE_SIZE);

	kunmap_atomic(kaddr);
}
Esempio n. 4
0
void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
	if (pfn_valid(pfn)) {
		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
		if (dirty)
			__flush_purge_region(page_address(page), PAGE_SIZE);
	}
}
Esempio n. 5
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
		    test_bit(PG_dcache_clean, &page->flags)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
			kunmap_coherent(kaddr);
		} else
			__flush_purge_region((void *)addr, PAGE_SIZE);
	}
}
Esempio n. 6
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_txdesc *txdesc;
	u32 entry;
	unsigned long flags;

	spin_lock_irqsave(&mdp->lock, flags);
	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
		if (!sh_eth_txfree(ndev)) {
			if (netif_msg_tx_queued(mdp))
				dev_warn(&ndev->dev, "TxFD exhausted.\n");
			netif_stop_queue(ndev);
			spin_unlock_irqrestore(&mdp->lock, flags);
			return NETDEV_TX_BUSY;
		}
	}
	spin_unlock_irqrestore(&mdp->lock, flags);

	entry = mdp->cur_tx % TX_RING_SIZE;
	mdp->tx_skbuff[entry] = skb;
	txdesc = &mdp->tx_ring[entry];
	txdesc->addr = virt_to_phys(skb->data);
	/* soft swap. */
	if (!mdp->cd->hw_swap)
		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
				 skb->len + 2);
	/* write back */
	__flush_purge_region(skb->data, skb->len);
	if (skb->len < ETHERSMALL)
		txdesc->buffer_length = ETHERSMALL;
	else
		txdesc->buffer_length = skb->len;

	if (entry >= TX_RING_SIZE - 1)
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
	else
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

	mdp->cur_tx++;

	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);

	return NETDEV_TX_OK;
}
static void nand_write_buf_cached_block(struct mtd_info *mtd,
					const uint8_t *buf, int len)
{
	struct nand_chip *chip = mtd->priv;
	struct stm_nand_emi *data = chip->priv;
	unsigned long irq_flags;

	while (len > 0) {
		local_irq_save(irq_flags);
		memcpy_toio(data->io_data, buf, min(len, CACHEDIO_BLOCK_SIZE));
		__flush_purge_region(data->io_data, CACHEDIO_BLOCK_SIZE);
		local_irq_restore(irq_flags);

		buf += CACHEDIO_BLOCK_SIZE;
		len -= CACHEDIO_BLOCK_SIZE;
	}
}
Esempio n. 8
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    struct sh_eth_txdesc *txdesc;
    u32 entry;
    unsigned long flags;

    spin_lock_irqsave(&mdp->lock, flags);
    if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
        if (!sh_eth_txfree(ndev)) {
            netif_stop_queue(ndev);
            spin_unlock_irqrestore(&mdp->lock, flags);
            return 1;
        }
    }
    spin_unlock_irqrestore(&mdp->lock, flags);

    entry = mdp->cur_tx % TX_RING_SIZE;
    mdp->tx_skbuff[entry] = skb;
    txdesc = &mdp->tx_ring[entry];
    txdesc->addr = (u32)(skb->data);
    /* soft swap. */
    swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
    /* write back */
    __flush_purge_region(skb->data, skb->len);
    if (skb->len < ETHERSMALL)
        txdesc->buffer_length = ETHERSMALL;
    else
        txdesc->buffer_length = skb->len;

    if (entry >= TX_RING_SIZE - 1)
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
    else
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

    mdp->cur_tx++;

    if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
        ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);

    ndev->trans_start = jiffies;

    return 0;
}
Esempio n. 9
0
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));

	switch (dir) {
	case DMA_FROM_DEVICE:		/* invalidate only */
		__flush_invalidate_region(addr, size);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		__flush_wback_region(addr, size);
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
		__flush_purge_region(addr, size);
		break;
	default:
		BUG();
	}
}
Esempio n. 10
0
void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
	if (pfn_valid(pfn) && page_mapping(page)) {
		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
		if (dirty) {
			unsigned long addr = (unsigned long)page_address(page);

			if (pages_do_alias(addr, address & PAGE_MASK))
				__flush_purge_region((void *)addr, PAGE_SIZE);
		}
	}
}
Esempio n. 11
0
void sh_sync_dma_for_device(void *vaddr, size_t size,
		    enum dma_data_direction direction)
{
	void *addr;

	addr = __in_29bit_mode() ?
	       (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;

	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
		__flush_invalidate_region(addr, size);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		__flush_wback_region(addr, size);
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
		__flush_purge_region(addr, size);
		break;
	default:
		BUG();
	}
}
Esempio n. 12
0
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
		    enum dma_data_direction direction)
{
#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
	void *p1addr = vaddr;
#else
	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
#endif

	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
		__flush_invalidate_region(p1addr, size);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		__flush_wback_region(p1addr, size);
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
		__flush_purge_region(p1addr, size);
		break;
	default:
		BUG();
	}
}
Esempio n. 13
0
void *dreamcast_consistent_alloc(struct device *dev, size_t size,
				 dma_addr_t *dma_handle, gfp_t flag)
{
	unsigned long buf;

	if (dev && dev->bus != &pci_bus_type)
		return NULL;

	if (gapspci_dma_used + size > GAPSPCI_DMA_SIZE)
		return ERR_PTR(-EINVAL);

	buf = GAPSPCI_DMA_BASE + gapspci_dma_used;

	gapspci_dma_used = PAGE_ALIGN(gapspci_dma_used+size);

	*dma_handle = (dma_addr_t)buf;

	buf = P2SEGADDR(buf);

	/* Flush the dcache before we hand off the buffer */
	__flush_purge_region((void *)buf, size);

	return (void *)buf;
}
Esempio n. 14
0
/*
 * This is called when a page-cache page is about to be mapped into a
 * user process' address space.  It offers an opportunity for a
 * port to ensure d-cache/i-cache coherency if necessary.
 *
 * Not entirely sure why this is necessary on SH3 with 32K cache but
 * without it we get occasional "Memory fault" when loading a program.
 */
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
    __flush_purge_region(page_address(page), PAGE_SIZE);
}
Esempio n. 15
0
/*
 * This is called when a page-cache page is about to be mapped into a
 * user process' address space.  It offers an opportunity for a
 * port to ensure d-cache/i-cache coherency if necessary.
 *
 * Not entirely sure why this is necessary on SH3 with 32K cache but
 * without it we get occasional "Memory fault" when loading a program.
 */
static void sh7705_flush_icache_page(void *page)
{
	__flush_purge_region(page_address(page), PAGE_SIZE);
}