Ejemplo n.º 1
0
static void
pscnv_swapping_memdump(struct pscnv_bo *bo)
{
	struct drm_device *dev = bo->dev;
	uint32_t pagenum;
	uint32_t i;
	uint32_t *mem;
	
	if (!bo->pages || !bo->pages[0]) {
		NV_INFO(dev, "pscnv_swapping_memdump: can not memdump bo with "
			     "cookie=%x, it has no pages attached\n", bo->cookie);
		return;
	}
	
	// bo->size is a multiple of page size
	for (pagenum = 0; pagenum < bo->size >> PAGE_SHIFT; pagenum++) {
		NV_INFO(dev, "=== DUMP BO %08x/%d page %u\n", bo->cookie, bo->serial, pagenum);
		mem = kmap(bo->pages[pagenum]);
		for (i = 0; i < 256; i += 4) {
			NV_INFO(dev, "%08x %08x %08x %08x\n",
				mem[i], mem[i+1], mem[i+2], mem[i+3]);
		}
		kunmap(bo->pages[pagenum]);
        }
}
Ejemplo n.º 2
0
int pscnv_ramht_insert(struct pscnv_ramht *ramht, uint32_t handle, uint32_t context) {
	/* XXX: check if the object exists already... */
	struct drm_nouveau_private *dev_priv = ramht->vo->dev->dev_private;
	uint32_t hash = pscnv_ramht_hash(ramht, handle);
	uint32_t start = hash * 8;
	uint32_t pos = start;
	if (pscnv_ramht_debug >= 2)
		NV_INFO(ramht->vo->dev, "Handle %x hash %x\n", handle, hash);
	spin_lock (&ramht->lock);
	do {
		if (!nv_rv32(ramht->vo, ramht->offset + pos + 4)) {
			nv_wv32(ramht->vo, ramht->offset + pos, handle);
			nv_wv32(ramht->vo, ramht->offset + pos + 4, context);
			dev_priv->vm->bar_flush(ramht->vo->dev);
			spin_unlock (&ramht->lock);
			if (pscnv_ramht_debug >= 1)
				NV_INFO(ramht->vo->dev, "Adding RAMHT entry for object %x at %x, context %x\n", handle, pos, context);
			return 0;
		}
		pos += 8;
		if (pos == 8 << ramht->bits)
			pos = 0;
	} while (pos != start);
	spin_unlock (&ramht->lock);
	NV_ERROR(ramht->vo->dev, "No RAMHT space for object %x\n", handle);
	return -ENOMEM;
}
Ejemplo n.º 3
0
static void
pscnv_swapping_swap_out(void *data, struct pscnv_client *cl)
{
	struct drm_device *dev = cl->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_swaptask *st = data;
	struct pscnv_chunk *cnk;
	int ret;
	size_t i;
	
	BUG_ON(st->tgt != cl);
	
	if (pscnv_swapping_debug >= 2) {
		NV_INFO(dev, "pscnv_swapping_swap_out: [client %d] begin swaptask "
				"%d with %lu chunks\n", cl->pid, st->serial,
				st->selected.size);
	}
	
	dev_priv->last_mem_alloc_change_time = jiffies;
	
	for (i = 0; i < st->selected.size; i++) {
		cnk = st->selected.chunks[i];
		
		if (pscnv_chunk_expect_alloc_type(cnk, PSCNV_CHUNK_VRAM,
						"pscnv_swapping_swap_out")) {
			continue;
		}
		
		/* until now: one chunk after the other
		 * increases swapped out counter and vram_demand (on fail) */
		ret = pscnv_vram_to_host(cnk);
		if (ret) {
			NV_ERROR(dev, "pscnv_swapping_swap_out: [client %d] vram_to_host"
				" failed for chunk %08x/%d-%u\n", cl->pid,
				cnk->bo->cookie, cnk->bo->serial, cnk->idx);
			/* continue and try with next */
		}

		mutex_lock(&dev_priv->clients->lock);
		pscnv_chunk_list_remove_unlocked(&cl->swap_pending, cnk);
		if (ret) {
			/* failure, return to swapping_options */
			pscnv_chunk_list_add_unlocked(&cl->swapping_options, cnk);
		} else {
			pscnv_chunk_list_add_unlocked(&cl->already_swapped, cnk);
		}
		mutex_unlock(&dev_priv->clients->lock);
	}
	
	if (pscnv_swapping_debug >= 2) {
		NV_INFO(dev, "pscnv_swapping_swap_out: [client %d] end swaptask %d\n",
				cl->pid, st->serial);
	}
	
	complete(&st->completion);
}
Ejemplo n.º 4
0
static void
nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
{
	NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
		nv_rd32(dev, base + 0x400));
	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
		nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
		nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
		nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
		nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
}
Ejemplo n.º 5
0
int pscnv_mm_alloc(struct pscnv_mm *mm, uint64_t size, uint32_t flags, uint64_t start, uint64_t end, struct pscnv_mm_node **res) {
	uint32_t psize;
	struct pscnv_mm_node *last = 0;
	int ret;
	if (flags & PSCNV_MM_LP)
		psize = mm->lpsize;
	else
		psize = mm->spsize;
	size = pscnv_roundup(size, psize);
	start = pscnv_roundup(start, psize);
	end = pscnv_rounddown(end, psize);
	/* avoid various bounduary conditions */
	if (size > (1ull << 60))
		return -EINVAL;
	if (pscnv_mm_debug >= 2)
		NV_INFO(mm->dev, "MM: [%s] Request for size %llx at %llx..%llx flags %d\n", mm->name, size, start, end, flags);
	pscnv_mm_validate(mm, "before mm_alloc");
	while (size) {
		struct pscnv_mm_node *cur;
		ret = pscnv_mm_alloc_single(PSCNV_RB_ROOT(&mm->head), size, flags, start, end, &cur);
		if (ret) {
			while (last) {
				cur = last->prev;
				pscnv_mm_free_node(last, true);
				last = cur;
			}
			/* set res to NULL if allocation fals. Do not return
			 * pointers on mm_nodes that belong to someone else */
			*res = NULL;
			return ret;
		}
		if (pscnv_mm_debug >= 1)
			NV_INFO(mm->dev, "MM: [%s] Allocated size %llx at %llx-%llx\n", mm->name, cur->size, cur->start, cur->start+cur->size);
		
		size -= cur->size;
		if (last) {
			/* subsequent iterations */
			last->next = cur;
			cur->prev = last;
			last = cur;
		} else {
			/* first iteration */
			*res = last = cur;
			cur->prev = 0;
		}
	}
	if (last)
		last->next = 0;
	pscnv_mm_validate(mm, "after mm_alloc");
	return 0;
}
Ejemplo n.º 6
0
int
nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
{
	struct nouveau_encoder *nv_encoder = NULL;
	struct drm_encoder *encoder;
	bool dum;
	int type;

	NV_DEBUG_KMS(dev, "\n");

	switch (entry->type) {
	case OUTPUT_TMDS:
		NV_INFO(dev, "Detected a TMDS output\n");
		type = DRM_MODE_ENCODER_TMDS;
		break;
	case OUTPUT_LVDS:
		NV_INFO(dev, "Detected a LVDS output\n");
		type = DRM_MODE_ENCODER_LVDS;

		if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
			NV_ERROR(dev, "Failed parsing LVDS table\n");
			return -EINVAL;
		}
		break;
	case OUTPUT_DP:
		NV_INFO(dev, "Detected a DP output\n");
		type = DRM_MODE_ENCODER_TMDS;
		break;
	default:
		return -EINVAL;
	}

	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
	if (!nv_encoder)
		return -ENOMEM;
	encoder = to_drm_encoder(nv_encoder);

	nv_encoder->dcb = entry;
	nv_encoder->or = ffs(entry->or) - 1;

	nv_encoder->disconnect = nv50_sor_disconnect;

	drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
	drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);

	encoder->possible_crtcs = entry->heads;
	encoder->possible_clones = 0;

	return 0;
}
Ejemplo n.º 7
0
/* free_node operations that do not actually free reserved memory, but just
 * set up the nodes in the tree, should be silent */
static void pscnv_mm_free_node(struct pscnv_mm_node *node, bool silent) {
	struct pscnv_mm_node *prev = PSCNV_RB_PREV(pscnv_mm_head, entry, node);
	struct pscnv_mm_node *next = PSCNV_RB_NEXT(pscnv_mm_head, entry, node);
	int i;
	if (pscnv_mm_debug >= 2 || (!silent && pscnv_mm_debug >= 1))
		NV_INFO(node->mm->dev, "MM: [%s] Freeing node %llx..%llx of type %d\n", node->mm->name, node->start, node->start + node->size, node->type);
	if (node->next) {
		node->next->prev = NULL;
		node->next = NULL;
	}
	if (pscnv_mm_debug >= 1 && node->prev)
		NV_ERROR(node->mm->dev, "A node that's about to be freed should not have a valid prev pointer!\n");
	node->prev = NULL;
	node->type = PSCNV_MM_TYPE_FREE;
	if (prev->type == PSCNV_MM_TYPE_FREE) {
		if (pscnv_mm_debug >= 2)
			NV_INFO(node->mm->dev, "MM: Merging left with node %llx..%llx\n", prev->start, prev->start + prev->size);
		if (prev->start + prev->size != node->start) {
			NV_ERROR(node->mm->dev, "MM: node %llx..%llx not contiguous with prev %llx..%llx\n",
				 node->start, node->start + node->size, prev->start, prev->start + prev->size);
			pscnv_mm_dump(PSCNV_RB_ROOT(&node->mm->head));
		} else {
			node->start = prev->start;
			node->size += prev->size;
			PSCNV_RB_REMOVE(pscnv_mm_head, &node->mm->head, prev);
			kfree(prev);
		}
	}
	if (next->type == PSCNV_MM_TYPE_FREE) {
		if (pscnv_mm_debug >= 2)
			NV_INFO(node->mm->dev, "MM: Merging right with node %llx..%llx\n", next->start, next->start + next->size);
		if (node->start + node->size != next->start) {
			NV_ERROR(node->mm->dev, "MM: node %llx..%llx not contiguous with next %llx..%llx\n",
				 node->start, node->start + node->size, next->start, next->start + next->size);
			pscnv_mm_dump(PSCNV_RB_ROOT(&node->mm->head));
		} else {
			node->size += next->size;
			PSCNV_RB_REMOVE(pscnv_mm_head, &node->mm->head, next);
			kfree(next);
		}
	}
	for (i = 0; i < GTYPES; i++) {
		uint64_t s, e;
		pscnv_mm_getfree(node, i, &s, &e);
		node->gap[i] = e - s;
	}
	pscnv_mm_augup(node);
}
Ejemplo n.º 8
0
static void
nvc0_memcpy_m2mf(struct pscnv_ib_chan *chan, const uint64_t dst_addr,
		 const uint64_t src_addr, const uint32_t size, int flags)
{
	/* MODE 1 means fire fence */
	static const uint32_t mode1 = 0x102110; /* QUERY_SHORT|QUERY_YES|SRC_LINEAR|DST_LINEAR */
	static const uint32_t mode2 = 0x100110; /* QUERY_SHORT|SRC_LINEAR|DST_LINEAR */
	static const uint32_t page_size = PSCNV_MEM_PAGE_SIZE;
	const uint32_t page_count = size / page_size;
	const uint32_t rem_size = size - page_size * page_count;
	
	uint64_t dst_pos = dst_addr;
	uint64_t src_pos = src_addr;
	uint32_t pages_left = page_count;
	
	if (flags & PSCNV_DMA_VERBOSE) {
		char size_str[16];
		pscnv_mem_human_readable(size_str, size);
		NV_INFO(chan->dev, "DMA: M2MF- copy %s from %llx to %llx\n",
			size_str, src_addr, dst_addr);
	}

	while (pages_left) {
		int line_count = (pages_left > 2047) ? 2047 : pages_left;
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x238, 2);
		OUT_RING(chan, dst_pos >> 32); /* OFFSET_OUT_HIGH */
		OUT_RING(chan, dst_pos); /* OFFSET_OUT_LOW */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32); /* OFFSET_IN_HIGH */
		OUT_RING(chan, src_pos); /* OFFSET_IN_LOW */
		OUT_RING(chan, page_size); /* SRC_PITCH_IN */
		OUT_RING(chan, page_size); /* DST_PITCH_IN */
		OUT_RING(chan, page_size); /* LINE_LENGTH_IN */
		OUT_RING(chan, line_count); /* LINE_COUNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x300, 1);
		if (pages_left == line_count && rem_size == 0)
			OUT_RING(chan, mode1); /* EXEC */
		else
			OUT_RING(chan, mode2); /* EXEC */
		pages_left -= line_count;
		dst_pos += (page_size * line_count);
		src_pos += (page_size * line_count);
	}
	if (rem_size) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x238, 2);
		OUT_RING(chan, dst_pos >> 32); /* OFFSET_OUT_HIGH */
		OUT_RING(chan, dst_pos); /* OFFSET_OUT_LOW */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32); /* OFFSET_IN_HIGH */
		OUT_RING(chan, src_pos); /* OFFSET_IN_LOW */
		OUT_RING(chan, rem_size); /* SRC_PITCH_IN */
		OUT_RING(chan, rem_size); /* DST_PITCH_IN */
		OUT_RING(chan, rem_size); /* LINE_LENGTH_IN */
		OUT_RING(chan, 1); /* LINE_COUNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x300, 1);
		OUT_RING(chan, mode1); /* EXEC */
	}

	FIRE_RING(chan);
}
Ejemplo n.º 9
0
int
pscnv_mem_free(struct pscnv_bo *bo)
{
	struct drm_nouveau_private *dev_priv = bo->dev->dev_private;
	if (pscnv_mem_debug >= 1)
		NV_INFO(bo->dev, "Freeing %d, %#llx-byte %sBO(%p) of type %08x, tile_flags %x\n", bo->serial, bo->size,
				(bo->flags & PSCNV_GEM_CONTIG ? "contig " : ""), bo, bo->cookie, bo->tile_flags);

	if (dev_priv->vm_ok && bo->map1)
		pscnv_vspace_unmap_node(bo->map1);
	if (dev_priv->vm_ok && bo->map3)
		pscnv_vspace_unmap_node(bo->map3);
	switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
		case PSCNV_GEM_VRAM_SMALL:
		case PSCNV_GEM_VRAM_LARGE:
			dev_priv->vram->free(bo);
			break;
		case PSCNV_GEM_SYSRAM_SNOOP:
		case PSCNV_GEM_SYSRAM_NOSNOOP:
			pscnv_sysram_free(bo);
			break;
	}
	kfree (bo);
	return 0;
}
Ejemplo n.º 10
0
struct pscnv_vspace *
pscnv_vspace_new (struct drm_device *dev, uint64_t size, uint32_t flags, int fake) {
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_vspace *res = kzalloc(sizeof *res, GFP_KERNEL);
	if (!res) {
		NV_ERROR(dev, "VM: Couldn't alloc vspace\n");
		return 0;
	}
	res->dev = dev;
	res->size = size;
	res->flags = flags;
	kref_init(&res->ref);
	mutex_init(&res->lock);
	if (pscnv_vspace_bind(res, fake)) {
		kfree(res);
		return 0;
	}
	NV_INFO(dev, "VM: Allocating vspace %d\n", res->vid);
	if (dev_priv->vm->do_vspace_new(res)) {
		pscnv_vspace_unbind(res);
		kfree(res);
		return 0;
	}
	return res;
}
Ejemplo n.º 11
0
/* return pointer to swaptask that received the chunk */
static struct pscnv_swaptask *
pscnv_swaptask_add_chunk_unlocked(struct list_head *swaptasks, struct pscnv_chunk *cnk)
{
	struct drm_device *dev = cnk->bo->dev;
	struct pscnv_client *tgt = cnk->bo->client;
	struct pscnv_swaptask *st;
	
	BUG_ON(!tgt);
	
	st = pscnv_swaptask_get(swaptasks, tgt);
	if (!st) {
		return NULL;
	}
	BUG_ON(st->tgt != tgt);
	
	if (pscnv_swapping_debug >= 3) {
		NV_INFO(dev, "pscnv_swaptask_add_chunk %08x/%d-%u for tgt %d to "
				"swaptask %d\n",
			cnk->bo->cookie, cnk->bo->serial, cnk->idx, tgt->pid,
			st->serial);
	}
	
	pscnv_chunk_list_add_unlocked(&st->selected, cnk);
	
	return st;
}
Ejemplo n.º 12
0
static int
nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
	struct nouveau_pm_level *perflvl = NULL;

	/* safety precaution, for now */
	if (nouveau_perflvl_wr != 7777)
		return -EPERM;

	if (!strncmp(profile, "boot", 4))
		perflvl = &pm->boot;
	else {
		int pl = simple_strtol(profile, NULL, 10);
		int i;

		for (i = 0; i < pm->nr_perflvl; i++) {
			if (pm->perflvl[i].id == pl) {
				perflvl = &pm->perflvl[i];
				break;
			}
		}

		if (!perflvl)
			return -EINVAL;
	}

	NV_INFO(dev, "setting performance level: %s\n", profile);
	return nouveau_pm_perflvl_set(dev, perflvl);
}
Ejemplo n.º 13
0
int
pscnv_vspace_map(struct pscnv_vspace *vs, struct pscnv_bo *bo,
		uint64_t start, uint64_t end, int back,
		struct pscnv_mm_node **res)
{
	struct pscnv_mm_node *node;
	int ret;
	struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
	mutex_lock(&vs->lock);
	ret = dev_priv->vm->place_map(vs, bo, start, end, back, &node);
	if (ret) {
		mutex_unlock(&vs->lock);
		return ret;
	}
	node->tag = bo;
	node->tag2 = vs;
	if (pscnv_vm_debug >= 1)
		NV_INFO(vs->dev, "VM: vspace %d: Mapping BO %x/%d at %llx-%llx.\n", vs->vid, bo->cookie, bo->serial, node->start,
				node->start + node->size);
	ret = dev_priv->vm->do_map(vs, bo, node->start);
	if (ret) {
		pscnv_vspace_unmap_node_unlocked(node);
	}
	*res = node;
	mutex_unlock(&vs->lock);
	return ret;
}
Ejemplo n.º 14
0
/* called once on driver load */
int
pscnv_swapping_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_swapping *swapping;
	int ret;
	
	if (pscnv_swapping_debug >= 1) {
		NV_INFO(dev, "pscnv_swapping: initalizing....\n");
	}
	
	dev_priv->swapping = kzalloc(sizeof(struct pscnv_swapping), GFP_KERNEL);
	if (!dev_priv->swapping) {
		NV_ERROR(dev, "Out of memory\n");
		return -ENOMEM;
	}
	swapping = dev_priv->swapping;
	
	swapping->dev = dev;
	atomic_set(&swapping->swaptask_serial, 0);
	init_completion(&swapping->next_swap);
	
	INIT_DELAYED_WORK(&swapping->increase_vram_work, increase_vram_work_func);
	ret = schedule_delayed_work(&swapping->increase_vram_work, PSCNV_INCREASE_RATE);
	
	return ret;
}
Ejemplo n.º 15
0
int
nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
{
	struct nouveau_encoder *nv_encoder;
	struct drm_encoder *encoder;

	NV_DEBUG(dev, "\n");
	NV_INFO(dev, "Detected a DAC output\n");

	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
	if (!nv_encoder)
		return -ENOMEM;
	encoder = to_drm_encoder(nv_encoder);

	nv_encoder->dcb = entry;
	nv_encoder->or = ffs(entry->or) - 1;

	nv_encoder->disconnect = nv50_dac_disconnect;

	drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
			 DRM_MODE_ENCODER_DAC);
	drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);

	encoder->possible_crtcs = entry->heads;
	encoder->possible_clones = 0;
	return 0;
}
Ejemplo n.º 16
0
uint32_t pscnv_ramht_find(struct pscnv_ramht *ramht, uint32_t handle) {
	/* XXX: do this properly. */
	uint32_t hash = pscnv_ramht_hash(ramht, handle);
	uint32_t start = hash * 8;
	uint32_t pos = start;
	uint32_t res;
	if (pscnv_ramht_debug >= 2)
		NV_INFO(ramht->vo->dev, "Handle %x hash %x\n", handle, hash);
	spin_lock (&ramht->lock);
	do {
		if (!nv_rv32(ramht->vo, ramht->offset + pos + 4))
			break;
		if (nv_rv32(ramht->vo, ramht->offset + pos) == handle) {
			res = nv_rv32(ramht->vo, ramht->offset + pos + 4);
			spin_unlock (&ramht->lock);
			return res;
		} 
		pos += 8;
		if (pos == 8 << ramht->bits)
			pos = 0;
	} while (pos != start);
	spin_unlock (&ramht->lock);
	NV_ERROR(ramht->vo->dev, "RAMHT object %x not found\n", handle);
	return 0;
}
Ejemplo n.º 17
0
struct pscnv_swaptask *
pscnv_swaptask_new(struct pscnv_client *tgt)
{
	struct drm_device *dev = tgt->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_swaptask *st;
	int serial;
	
	st = kzalloc(sizeof(struct pscnv_swaptask), GFP_KERNEL);
	
	if (!st) {
		NV_ERROR(dev, "pscnv_swaptask_new: out of memory\n");
		return NULL;
	}
	
	serial = atomic_inc_return(&dev_priv->swapping->swaptask_serial);
	
	if (pscnv_swapping_debug >= 3) {
		NV_INFO(dev, "pscnv_swaptask_new: new swaptask %d for client %d\n",
			serial, tgt->pid);
	}
	
	INIT_LIST_HEAD(&st->list);
	pscnv_chunk_list_init(&st->selected);
	st->tgt = tgt;
	st->dev = dev;
	st->serial = serial;
	init_completion(&st->completion);
	
	return st;
}
Ejemplo n.º 18
0
int
nouveau_pm_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
	char info[256];
	int ret, i;

	nouveau_mem_timing_init(dev);
	nouveau_volt_init(dev);
	nouveau_perf_init(dev);
	nouveau_temp_init(dev);

	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
	for (i = 0; i < pm->nr_perflvl; i++) {
		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
		NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
	}

	/* determine current ("boot") performance level */
	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
	if (ret == 0) {
		strncpy(pm->boot.name, "boot", 4);
		pm->cur = &pm->boot;

		nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
		NV_INFO(dev, "c:%s", info);
	}

	/* switch performance levels now if requested */
	if (nouveau_perflvl != NULL) {
		ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
		if (ret) {
			NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
				 nouveau_perflvl, ret);
		}
	}

	nouveau_sysfs_init(dev);
	nouveau_hwmon_init(dev);
#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
	register_acpi_notifier(&pm->acpi_nb);
#endif

	return 0;
}
Ejemplo n.º 19
0
struct pscnv_bo *
pscnv_mem_alloc(struct drm_device *dev,
		uint64_t size, int flags, int tile_flags, uint32_t cookie)
{
	static int serial = 0;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_bo *res;
	int ret;
	/* avoid all sorts of integer overflows possible otherwise. */
	if (size >= (1ULL << 40))
		return 0;
	if (!size)
		return 0;

	res = kzalloc (sizeof *res, GFP_KERNEL);
	if (!res)
		return 0;
	size = (size + PSCNV_MEM_PAGE_SIZE - 1) & ~(PSCNV_MEM_PAGE_SIZE - 1);
	size = PAGE_ALIGN(size);
	res->dev = dev;
	res->size = size;
	res->flags = flags;
	res->tile_flags = tile_flags;
	res->cookie = cookie;
	res->gem = 0;

	/* XXX: another mutex? */
	mutex_lock(&dev_priv->vram_mutex);
	res->serial = serial++;
	mutex_unlock(&dev_priv->vram_mutex);

	if (pscnv_mem_debug >= 1)
		NV_INFO(dev, "Allocating %d, %#llx-byte %sBO of type %08x, tile_flags %x\n", res->serial, size,
				(flags & PSCNV_GEM_CONTIG ? "contig " : ""), cookie, tile_flags);
	switch (res->flags & PSCNV_GEM_MEMTYPE_MASK) {
		case PSCNV_GEM_VRAM_SMALL:
		case PSCNV_GEM_VRAM_LARGE:
			ret = dev_priv->vram->alloc(res);
			break;
		case PSCNV_GEM_SYSRAM_SNOOP:
		case PSCNV_GEM_SYSRAM_NOSNOOP:
			if (dev_priv->vram->sysram_tiling_ok(res))
				ret = pscnv_sysram_alloc(res);
			else
				ret = -EINVAL;
			break;
		default:
			ret = -ENOSYS;
	}
	if (ret) {
		kfree(res);
		return 0;
	}
	return res;
}
Ejemplo n.º 20
0
int
nouveau_pm_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
	char info[256];
	int ret, i;

	nouveau_volt_init(dev);
	nouveau_perf_init(dev);
	nouveau_temp_init(dev);
	nouveau_mem_timing_init(dev);

	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
	for (i = 0; i < pm->nr_perflvl; i++) {
		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
		NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
	}

	/* determine current ("boot") performance level */
	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
	if (ret == 0) {
		pm->cur = &pm->boot;

		nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
		NV_INFO(dev, "c: %s", info);
	}

	/* switch performance levels now if requested */
	if (nouveau_perflvl != NULL) {
		ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
		if (ret) {
			NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
				 nouveau_perflvl, ret);
		}
	}

	nouveau_sysfs_init(dev);
	nouveau_hwmon_init(dev);

	return 0;
}
Ejemplo n.º 21
0
static inline void
nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
{
	u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
	u32 stat = nv_rd32(dev, subp_base + 0x020);

	if (stat) {
		NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
		nv_wr32(dev, subp_base + 0x020, stat);
	}
}
Ejemplo n.º 22
0
static void
nvc0_memcpy_pcopy0(struct pscnv_ib_chan *chan, const uint64_t dst_addr,
		 const uint64_t src_addr, const uint32_t size, int flags)
{
	static const uint32_t mode = 0x3110; /* QUERY_SHORT|QUERY|SRC_LINEAR|DST_LINEAR */
	static const uint32_t pitch = 0x8000;
	const uint32_t ycnt = size / pitch;
	const uint32_t rem_size = size - ycnt * pitch;
	
	uint64_t dst_pos = dst_addr;
	uint64_t src_pos = src_addr;
	
	if (flags & PSCNV_DMA_VERBOSE) {
		char size_str[16];
		pscnv_mem_human_readable(size_str, size);
		NV_INFO(chan->dev, "DMA: PCOPY0- copy %s from %llx to %llx\n",
			size_str, src_addr, dst_addr);
	}

	if (ycnt) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32);  /* SRC_ADDR_HIGH */
		OUT_RING(chan, src_pos);	/* SRC_ADDR_LOW */
		OUT_RING(chan, dst_pos >> 32);  /* DST_ADDR_HIGH */
		OUT_RING(chan, dst_pos);	/* DST_ADDR_LOW */
		OUT_RING(chan, pitch);		/* SRC_PITCH_IN */
		OUT_RING(chan, pitch);		/* DST_PITCH_IN */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x324, 2);
		OUT_RING(chan, pitch);		/* XCNT */
		OUT_RING(chan, ycnt);		/* YCNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x300, 1);
		OUT_RING(chan, mode);		/* EXEC */
		FIRE_RING(chan);
	}
	
	dst_pos += ycnt * pitch;
	src_pos += ycnt * pitch;
	
	if (rem_size) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32);  /* SRC_ADDR_HIGH */
		OUT_RING(chan, src_pos);	/* SRC_ADDR_LOW */
		OUT_RING(chan, dst_pos >> 32);  /* DST_ADDR_HIGH */
		OUT_RING(chan, dst_pos);	/* DST_ADDR_LOW */
		OUT_RING(chan, rem_size);	/* SRC_PITCH_IN */
		OUT_RING(chan, rem_size);	/* DST_PITCH_IN */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x324, 2);
		OUT_RING(chan, rem_size);	/* XCNT */
		OUT_RING(chan, 1);		/* YCNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x300, 1);
		OUT_RING(chan, mode);		/* EXEC */
		FIRE_RING(chan);
	}
}
Ejemplo n.º 23
0
/* if we have an OF card, copy vbios to RAMIN */
static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
{
#if defined(__powerpc__)
    int size, i;
    const uint32_t *bios;
    struct device_node *dn = pci_device_to_OF_node(dev->pdev);
    if (!dn) {
        NV_INFO(dev, "Unable to get the OF node\n");
        return;
    }

    bios = of_get_property(dn, "NVDA,BMP", &size);
    if (bios) {
        for (i = 0; i < size; i += 4)
            nv_wi32(dev, i, bios[i/4]);
        NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
    } else {
        NV_INFO(dev, "Unable to get the OF bios\n");
    }
#endif
}
Ejemplo n.º 24
0
void pscnv_vspace_ref_free(struct kref *ref) {
	struct pscnv_vspace *vs = container_of(ref, struct pscnv_vspace, ref);
	struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
	NV_INFO(vs->dev, "VM: Freeing vspace %d\n", vs->vid);
	if (vs->vid < 0)
		pscnv_mm_takedown(vs->mm, pscnv_mm_free);
	else
		pscnv_mm_takedown(vs->mm, pscnv_vspace_free_unmap);
	dev_priv->vm->do_vspace_free(vs);
	pscnv_vspace_unbind(vs);
	kfree(vs);
}
Ejemplo n.º 25
0
void
pscnv_swaptask_free(struct pscnv_swaptask *st)
{
	struct drm_device *dev = st->dev;
	
	if (pscnv_swapping_debug >= 3) {
		NV_INFO(dev, "pscnv_swaptask_free: free swaptask %d for client %d\n",
			st->serial, st->tgt->pid);
	}
	
	pscnv_chunk_list_free(&st->selected);
	kfree(st);
}
int
nouveau_irq_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int ret;

	if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
		ret = pci_enable_msi(dev->pdev);
		if (ret == 0) {
			NV_INFO(dev, "enabled MSI\n");
			dev_priv->msi_enabled = true;
		}
	}

	return drm_irq_install(dev);
}
Ejemplo n.º 27
0
static int
pscnv_vspace_unmap_node_unlocked(struct pscnv_mm_node *node) {
	struct pscnv_vspace *vs = node->tag2;
	struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
	struct pscnv_bo *bo = node->tag;
	if (pscnv_vm_debug >= 1) {
		NV_INFO(vs->dev, "VM: vspace %d: Unmapping range %llx-%llx.\n", vs->vid, node->start, node->start + node->size);
	}
	dev_priv->vm->do_unmap(vs, node->start, node->size);

	if (vs->vid >= 0) {
		drm_gem_object_unreference(bo->gem);
	}
	pscnv_mm_free(node);
	return 0;
}
Ejemplo n.º 28
0
void pscnv_mm_takedown(struct pscnv_mm *mm, void (*free_callback)(struct pscnv_mm_node *)) {
	struct pscnv_mm_node *cur;
	pscnv_mm_validate(mm, "before mm_takedown");
restart:
	cur = PSCNV_RB_MIN(pscnv_mm_head, &mm->head);
	cur = PSCNV_RB_NEXT(pscnv_mm_head, entry, cur);
	while (cur->type == PSCNV_MM_TYPE_FREE)
		cur = PSCNV_RB_NEXT(pscnv_mm_head, entry, cur);
	if (!cur->sentinel) {
		while (cur->prev)
			cur = cur->prev;
		if (pscnv_mm_debug >= 1)
			NV_INFO (mm->dev, "MM: [%s] takedown free %llx..%llx type %d\n", mm->name, cur->start, cur->start + cur->size, cur->type);
		free_callback(cur);
		goto restart;
	}
	while ((cur = PSCNV_RB_ROOT(&mm->head))) {
		PSCNV_RB_REMOVE(pscnv_mm_head, &mm->head, cur);
		kfree(cur);
	}
	kfree(mm);
}
Ejemplo n.º 29
0
int
nvc0_vram_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int ret;
	uint32_t ctrlr_num, ctrlr_amt;

	dev_priv->vram = kzalloc (sizeof *dev_priv->vram, GFP_KERNEL);
	if (!dev_priv->vram) {
		NV_ERROR(dev, "VRAM: out ot memory\n");
		return -ENOMEM;
	}

	dev_priv->vram->alloc = nvc0_vram_alloc;
	dev_priv->vram->free = pscnv_vram_free;
	dev_priv->vram->takedown = pscnv_vram_takedown;

	ctrlr_num = nv_rd32(dev, NVC0_MEM_CTRLR_COUNT);
	ctrlr_amt = nv_rd32(dev, NVC0_MEM_CTRLR_RAM_AMOUNT);

	dev_priv->vram_size = ctrlr_num * (ctrlr_amt << 20);

	if (!dev_priv->vram_size) {
		NV_ERROR(dev, "No VRAM detected, aborting.\n");
		return -ENODEV;
	}

	NV_INFO(dev, "VRAM: size 0x%llx, %d controllers\n",
			dev_priv->vram_size, ctrlr_num);

	ret = pscnv_mm_init(dev, 0x40000, dev_priv->vram_size - 0x20000, 0x1000, 0x20000, 0x1000, &dev_priv->vram_mm);
	if (ret) {
		kfree(dev_priv->vram);
		return ret;
	}

	return 0;
}
Ejemplo n.º 30
0
void
pscnv_dma_exit(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct pscnv_dma *dma = dev_priv->dma;

	if (!dma) {
		NV_WARN(dev, "DMA: pscnv_dma_exit() called but DMA was never initialized!\n");
		return;
	}

	NV_INFO(dev, "DMA: Exiting...\n");

	//No need to undo pscnv_ib_init_subch, since it only performs
	//subchannel configuration on a channel we are about to close anyways...
	//No need to undo pscnv_ib_add_fence(), as pscnv_ib_chan_kill() inside pscnv_ib_chan_free() takes care of this
	pscnv_ib_chan_free(dma->ib_chan);
	pscnv_vspace_unref(dma->vs);
	//Undo pscnv_vspace_new should not be necessary, as pscnv_vspace_unref() does freeing, unless we have one reference too many
	mutex_destroy(&dma->lock);

	kfree(dma); dev_priv->dma = dma = 0;
}