Esempio n. 1
0
static int serverworks_create_page_map(struct serverworks_page_map *page_map)
{
	int i;

	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
	if (page_map->real == NULL) {
		return -ENOMEM;
	}
	SetPageReserved(virt_to_page(page_map->real));
	global_cache_flush();
	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
					    PAGE_SIZE);
	if (page_map->remapped == NULL) {
		ClearPageReserved(virt_to_page(page_map->real));
		free_page((unsigned long) page_map->real);
		page_map->real = NULL;
		return -ENOMEM;
	}
	global_cache_flush();

	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
		page_map->remapped[i] = agp_bridge->scratch_page;
	}

	return 0;
}
Esempio n. 2
0
static int amd_create_page_map(struct amd_page_map *page_map)
{
	int i;

	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
	if (page_map->real == NULL)
		return -ENOMEM;

	SetPageReserved(virt_to_page(page_map->real));
	global_cache_flush();
	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
					    PAGE_SIZE);
	if (page_map->remapped == NULL) {
		ClearPageReserved(virt_to_page(page_map->real));
		free_page((unsigned long) page_map->real);
		page_map->real = NULL;
		return -ENOMEM;
	}
	global_cache_flush();

	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
		writel(agp_bridge->scratch_page, page_map->remapped+i);
		readl(page_map->remapped+i);	/* PCI Posting. */
	}

	return 0;
}
Esempio n. 3
0
static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
	int i, j;

	if ((type != 0) || (mem->type != 0))
		return -EINVAL;

	if ((pg_start + mem->page_count) >
		(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
		return -EINVAL;

	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
			return -EBUSY;
	}

	if (mem->is_flushed == FALSE) {
		global_cache_flush();
		mem->is_flushed = TRUE;
	}
	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		writel(agp_bridge->driver->mask_memory(agp_bridge,
			mem->memory[i], mem->type),
			agp_bridge->gatt_table+nvidia_private.pg_offset+j);
		readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j);	/* PCI Posting. */
	}
	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
Esempio n. 4
0
static int ati_create_page_map(ati_page_map *page_map)
{
	int i, err = 0;

	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
	if (page_map->real == NULL)
		return -ENOMEM;

	SetPageReserved(virt_to_page(page_map->real));
	err = map_page_into_agp(virt_to_page(page_map->real));
	page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
					    PAGE_SIZE);
	if (page_map->remapped == NULL || err) {
		ClearPageReserved(virt_to_page(page_map->real));
		free_page((unsigned long) page_map->real);
		page_map->real = NULL;
		return -ENOMEM;
	}
	/*CACHE_FLUSH();*/
	global_cache_flush();

	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
		writel(agp_bridge->scratch_page, page_map->remapped+i);
		readl(page_map->remapped+i);	/* PCI Posting. */
	}

	return 0;
}
Esempio n. 5
0
static void ali_destroy_page(struct page *page, int flags)
{
	if (page) {
		if (flags & AGP_PAGE_DESTROY_UNMAP) {
			global_cache_flush();	/* is this really needed?  --hch */
			agp_generic_destroy_page(page, flags);
		} else
			agp_generic_destroy_page(page, flags);
	}
}
Esempio n. 6
0
static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
    int i, j, num_entries;
    long long tmp;
    int mask_type;
    struct agp_bridge_data *bridge = mem->bridge;
    u32 pte;

    num_entries = agp_num_entries();

    if (type != mem->type)
        return -EINVAL;
    mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
    if (mask_type != 0)
        return -EINVAL;


    /* Make sure we can fit the range in the gatt table. */
    /* FIXME: could wrap */
    if (((unsigned long)pg_start + mem->page_count) > num_entries)
        return -EINVAL;

    j = pg_start;

    /* gatt table should be empty. */
    while (j < (pg_start + mem->page_count)) {
        if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
            return -EBUSY;
        j++;
    }

    if (!mem->is_flushed) {
        global_cache_flush();
        mem->is_flushed = true;
    }

    for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
        tmp = agp_bridge->driver->mask_memory(agp_bridge,
            mem->memory[i], mask_type);

        BUG_ON(tmp & 0xffffff0000000ffcULL);
        pte = (tmp & 0x000000ff00000000ULL) >> 28;
        pte |=(tmp & 0x00000000fffff000ULL);
        pte |= GPTE_VALID | GPTE_COHERENT;

        writel(pte, agp_bridge->gatt_table+j);
        readl(agp_bridge->gatt_table+j);    /* PCI Posting. */
    }
    amd64_tlbflush(mem);
    return 0;
}
Esempio n. 7
0
static int ati_insert_memory(struct agp_memory * mem,
			     off_t pg_start, int type)
{
	int i, j, num_entries;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;
	int mask_type;

	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	if ((pg_start + mem->page_count) > num_entries)
		return -EINVAL;

	j = pg_start;
	while (j < (pg_start + mem->page_count)) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
			return -EBUSY;
		j++;
	}

	if (!mem->is_flushed) {
		/*CACHE_FLUSH(); */
		global_cache_flush();
		mem->is_flushed = true;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		writel(agp_bridge->driver->mask_memory(agp_bridge,	
						       page_to_phys(mem->pages[i]),
						       mem->type),
		       cur_gatt+GET_GATT_OFF(addr));
	}
	readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
Esempio n. 8
0
static int
hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
{
	struct _hp_private *hp = &hp_private;
	int i, k;
	off_t j, io_pg_start;
	int io_pg_count;

	if (type != 0 || mem->type != 0) {
		return -EINVAL;
	}

	io_pg_start = hp->io_pages_per_kpage * pg_start;
	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
		return -EINVAL;
	}

	j = io_pg_start;
	while (j < (io_pg_start + io_pg_count)) {
		if (hp->gatt[j]) {
			return -EBUSY;
		}
		j++;
	}

	if (mem->is_flushed == FALSE) {
		global_cache_flush();
		mem->is_flushed = TRUE;
	}

	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
		unsigned long paddr;

		paddr = mem->memory[i];
		for (k = 0;
		     k < hp->io_pages_per_kpage;
		     k++, j++, paddr += hp->io_page_size) {
			hp->gatt[j] =
				agp_bridge->driver->mask_memory(agp_bridge,
					paddr, type);
		}
	}

	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
Esempio n. 9
0
static void m1541_cache_flush(void)
{
	int i, page_count;
	u32 temp;

	global_cache_flush();

	page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
	for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
		pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
				&temp);
		pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
				(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
				  (agp_bridge->gatt_bus_addr + i)) |
				 ALI_CACHE_FLUSH_EN));
	}
}
Esempio n. 10
0
static void m1541_destroy_page(struct page *page, int flags)
{
	u32 temp;

	if (page == NULL)
		return;

	if (flags & AGP_PAGE_DESTROY_UNMAP) {
		global_cache_flush();

		pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
		pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
				       (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
					 page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
	}
	agp_generic_destroy_page(page, flags);
}
Esempio n. 11
0
static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
	int i, j, num_entries;
	long tmp;
	u32 pte;

	num_entries = agp_num_entries();

	if (type != 0 || mem->type != 0)
		return -EINVAL;

	/* Make sure we can fit the range in the gatt table. */
	/* FIXME: could wrap */
	if (((unsigned long)pg_start + mem->page_count) > num_entries)
		return -EINVAL;

	j = pg_start;

	/* gatt table should be empty. */
	while (j < (pg_start + mem->page_count)) {
		if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j]))
			return -EBUSY;
		j++;
	}

	if (mem->is_flushed == FALSE) {
		global_cache_flush();
		mem->is_flushed = TRUE;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		tmp = agp_bridge->driver->mask_memory(mem->memory[i], mem->type);

		BUG_ON(tmp & 0xffffff0000000ffcULL);
		pte = (tmp & 0x000000ff00000000ULL) >> 28;
		pte |=(tmp & 0x00000000fffff000ULL);
		pte |= GPTE_VALID | GPTE_COHERENT;

		agp_bridge->gatt_table[j] = pte;
	}
	amd64_tlbflush(mem);
	return 0;
}
Esempio n. 12
0
static int serverworks_insert_memory(struct agp_memory *mem,
			     off_t pg_start, int type)
{
	int i, j, num_entries;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;

	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;

	if (type != 0 || mem->type != 0) {
		return -EINVAL;
	}
	if ((pg_start + mem->page_count) > num_entries) {
		return -EINVAL;
	}

	j = pg_start;
	while (j < (pg_start + mem->page_count)) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = SVRWRKS_GET_GATT(addr);
		if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
			return -EBUSY;
		j++;
	}

	if (!mem->is_flushed) {
		global_cache_flush();
		mem->is_flushed = true;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = SVRWRKS_GET_GATT(addr);
		writel(agp_bridge->driver->mask_memory(agp_bridge, 
				page_to_phys(mem->pages[i]), mem->type),
		       cur_gatt+GET_GATT_OFF(addr));
	}
	serverworks_tlbflush(mem);
	return 0;
}
Esempio n. 13
0
static int serverworks_insert_memory(struct agp_memory *mem,
			     off_t pg_start, int type)
{
	int i, j, num_entries;
	unsigned long *cur_gatt;
	unsigned long addr;

	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;

	if (type != 0 || mem->type != 0) {
		return -EINVAL;
	}
	if ((pg_start + mem->page_count) > num_entries) {
		return -EINVAL;
	}

	j = pg_start;
	while (j < (pg_start + mem->page_count)) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = SVRWRKS_GET_GATT(addr);
		if (!PGE_EMPTY(agp_bridge, cur_gatt[GET_GATT_OFF(addr)])) {
			return -EBUSY;
		}
		j++;
	}

	if (mem->is_flushed == FALSE) {
		global_cache_flush();
		mem->is_flushed = TRUE;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = SVRWRKS_GET_GATT(addr);
		cur_gatt[GET_GATT_OFF(addr)] =
			agp_bridge->driver->mask_memory(mem->memory[i], mem->type);
	}
	serverworks_tlbflush(mem);
	return 0;
}
Esempio n. 14
0
static int ati_insert_memory(struct agp_memory * mem,
			     off_t pg_start, int type)
{
	int i, j, num_entries;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;

	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;

	if (type != 0 || mem->type != 0)
		return -EINVAL;

	if ((pg_start + mem->page_count) > num_entries)
		return -EINVAL;

	j = pg_start;
	while (j < (pg_start + mem->page_count)) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
			return -EBUSY;
		j++;
	}

	if (mem->is_flushed == FALSE) {
		/*CACHE_FLUSH(); */
		global_cache_flush();
		mem->is_flushed = TRUE;
	}

	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		writel(agp_bridge->driver->mask_memory(agp_bridge,
			mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
		readl(cur_gatt+GET_GATT_OFF(addr));	/* PCI Posting. */
	}
	agp_bridge->driver->tlb_flush(mem);
	return 0;
}
Esempio n. 15
0
static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
			     int type)
{
	int i;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;

	if (type != 0 || mem->type != 0) {
		return -EINVAL;
	}

	global_cache_flush();
	serverworks_tlbflush(mem);

	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = SVRWRKS_GET_GATT(addr);
		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
	}

	serverworks_tlbflush(mem);
	return 0;
}
Esempio n. 16
0
static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
	int i, j;
	int mask_type;

	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
	if (mask_type != 0 || type != mem->type)
		return -EINVAL;

	if (mem->page_count == 0)
		return 0;

	if ((pg_start + mem->page_count) >
		(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
		return -EINVAL;

	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
			return -EBUSY;
	}

	if (!mem->is_flushed) {
		global_cache_flush();
		mem->is_flushed = true;
	}
	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
		writel(agp_bridge->driver->mask_memory(agp_bridge,
			       page_to_phys(mem->pages[i]), mask_type),
			agp_bridge->gatt_table+nvidia_private.pg_offset+j);
	}

	/* PCI Posting. */
	readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);

	agp_bridge->driver->tlb_flush(mem);
	return 0;
}