Beispiel #1
0
static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
			     int type)
{
	int i;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;
	int mask_type;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
	}

	readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
Beispiel #2
0
static int ati_insert_memory(struct agp_memory * mem,
			     off_t pg_start, int type)
{
	int i, j, num_entries;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;
	int mask_type;

	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	if ((pg_start + mem->page_count) > num_entries)
		return -EINVAL;

	j = pg_start;
	while (j < (pg_start + mem->page_count)) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
			return -EBUSY;
		j++;
	}

	if (!mem->is_flushed) {
		/*CACHE_FLUSH(); */
		global_cache_flush();
		mem->is_flushed = true;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		writel(agp_bridge->driver->mask_memory(agp_bridge,	
						       page_to_phys(mem->pages[i]),
						       mem->type),
		       cur_gatt+GET_GATT_OFF(addr));
	}
	readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
	int i;

	int mask_type;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	for (i = pg_start; i < (mem->page_count + pg_start); i++)
		writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);

	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
	int i, j;
	int mask_type;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	if ((pg_start + mem->page_count) >
		(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
		return -EINVAL;

	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
			return -EBUSY;
	}

	if (!mem->is_flushed) {
		global_cache_flush();
		mem->is_flushed = true;
	}
	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		writel(agp_bridge->driver->mask_memory(agp_bridge,
			       page_to_phys(mem->pages[i]), mask_type),
			agp_bridge->gatt_table+nvidia_private.pg_offset+j);
	}

	/* PCI Posting. */
	readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);

	agp_bridge->driver->tlb_flush(mem);
	return 0;
}