Esempio n. 1
0
static void
handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
{
  FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  SI address = req->address;
  SI interfere_address = req->u.invalidate.all ? -1 : address;

  /* If this address interferes with an existing request, then requeue it.  */
  if (address_interference (cache, interfere_address, req, pipe))
    {
      pipeline_requeue_request (pipeline);
      return;
    }

  /* Invalidate the cache line now.  This function already checks for
     non-cache access.  */
  if (req->u.invalidate.all)
    frv_cache_invalidate_all (cache, req->u.invalidate.flush);
  else
    frv_cache_invalidate (cache, address, req->u.invalidate.flush);
  if (req->u.invalidate.flush)
    {
      pipeline->status.flush.reqno = req->reqno;
      pipeline->status.flush.address = address;
      pipeline->status.flush.valid = 1;
    }
}
Esempio n. 2
0
/*
 * This function will allocate the requested contiguous pages and
 * map them into the kernel's vmalloc() space.  This is done so we
 * get unique mapping for these pages, outside of the kernel's 1:1
 * virtual:physical mapping.  This is necessary so we can cover large
 * portions of the kernel with single large page TLB entries, and
 * still get unique uncached pages for consistent DMA.
 */
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{
	struct vm_struct *area;
	unsigned long page, va, pa;
	void *ret;
	int order, err, i;

	if (in_interrupt())
		BUG();

	/* only allocate page size areas */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	page = __get_free_pages(gfp, order);
	if (!page) {
		BUG();
		return NULL;
	}

	/* allocate some common virtual space to map the new pages */
	area = get_vm_area(size, VM_ALLOC);
	if (area == 0) {
		free_pages(page, order);
		return NULL;
	}
	va = VMALLOC_VMADDR(area->addr);
	ret = (void *) va;

	/* this gives us the real physical address of the first page */
	*dma_handle = pa = virt_to_bus((void *) page);

	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
	 * all pages that were allocated.
	 */
	if (order > 0) {
		struct page *rpage = virt_to_page(page);

		for (i = 1; i < (1 << order); i++)
			set_page_count(rpage + i, 1);
	}

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);

	if (err) {
		vfree((void *) va);
		return NULL;
	}

	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
	 * - can't do until after virtual address mappings are created
	 */
	frv_cache_invalidate(va, va + size);

	return ret;
}
Esempio n. 3
0
/*
 * make an area consistent.
 */
void consistent_sync(void *vaddr, size_t size, int direction)
{
	unsigned long start = (unsigned long) vaddr;
	unsigned long end   = start + size;

	switch (direction) {
	case PCI_DMA_NONE:
		BUG();
	case PCI_DMA_FROMDEVICE:	/* invalidate only */
		frv_cache_invalidate(start, end);
		break;
	case PCI_DMA_TODEVICE:		/* writeback only */
		frv_dcache_writeback(start, end);
		break;
	case PCI_DMA_BIDIRECTIONAL:	/* writeback and invalidate */
		frv_dcache_writeback(start, end);
		break;
	}
}
Esempio n. 4
0
void
frvbf_check_recovering_store (
  SIM_CPU *current_cpu, PCADDR address, SI regno, int size, int is_float
)
{
  FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
  int reg_ix;

  CPU_RSTR_INVALIDATE(current_cpu) = 0;

  for (reg_ix = next_valid_nesr (current_cpu, NO_NESR);
       reg_ix != NO_NESR;
       reg_ix = next_valid_nesr (current_cpu, reg_ix))
    {
      if (address == GET_H_SPR (H_SPR_NEEAR0 + reg_ix))
	{
	  SI nesr = GET_NESR (reg_ix);
	  int nesr_drn = GET_NESR_DRN (nesr);
	  BI nesr_fr = GET_NESR_FR (nesr);
	  SI remain;

	  /* Invalidate cache block containing this address.
	     If we need to count cycles, then the cache operation will be
	     initiated from the model profiling functions.
	     See frvbf_model_....  */
	  if (model_insn)
	    {
	      CPU_RSTR_INVALIDATE(current_cpu) = 1;
	      CPU_LOAD_ADDRESS (current_cpu) = address;
	    }
	  else
	    frv_cache_invalidate (cache, address, 1/* flush */);

	  /* Copy the stored value to the register indicated by NESR.DRN.  */
	  for (remain = size; remain > 0; remain -= 4)
	    {
	      SI value;

	      if (is_float)
		value = GET_H_FR (regno);
	      else
		value = GET_H_GR (regno);

	      switch (size)
		{
		case 1:
		  value &= 0xff;
		  break;
		case 2:
		  value &= 0xffff;
		  break;
		default:
		  break;
		}

	      if (nesr_fr)
		sim_queue_fn_sf_write (current_cpu, frvbf_h_fr_set, nesr_drn,
				       value);
	      else
		sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, nesr_drn,
				       value);

	      nesr_drn++;
	      regno++;
	    }
	  break; /* Only consider the first matching register.  */
	}
    } /* loop over active neear registers.  */
}