int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; ret = nouveau_gem_new(cli, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&nvbo->gem); return ret; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_fb *pfb = nouveau_fb(drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
static int sfbhack_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo = NULL; uint32_t tile_flags = dev_priv->card_type == NV_50 ? 0x7000 : 0x0000; int ret, size; if (dev_priv->sfb_gem) return 0; size = nouveau_mem_fb_amount(dev); if (size > drm_get_resource_len(dev, 1)) size = drm_get_resource_len(dev, 1); size >>= 1; ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 0, tile_flags, false, true, &nvbo); if (ret) return ret; ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ret; } dev_priv->sfb_gem = nvbo->gem; return 0; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; struct nouveau_channel *chan = NULL; uint32_t flags = 0; int ret = 0; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) flags |= TTM_PL_FLAG_VRAM; if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) flags |= TTM_PL_FLAG_TT; if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } if (req->channel_hint) { chan = nouveau_channel_get(dev, file_priv, req->channel_hint); if (IS_ERR(chan)) return PTR_ERR(chan); } ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, req->info.tile_mode, req->info.tile_flags, false, (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), &nvbo); if (chan) nouveau_channel_put(&chan); if (ret) return ret; ret = nouveau_gem_info(nvbo->gem, &req->info); if (ret) goto out; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(nvbo->gem); out: return ret; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; struct nouveau_channel *chan = NULL; uint32_t flags = 0; int ret = 0; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; if (req->channel_hint) { NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, file_priv, chan); } if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) flags |= TTM_PL_FLAG_VRAM; if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) flags |= TTM_PL_FLAG_TT; if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) return -EINVAL; ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, req->info.tile_mode, req->info.tile_flags, false, (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), &nvbo); if (ret) return ret; ret = nouveau_gem_info(nvbo->gem, &req->info); if (ret) goto out; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); out: drm_gem_object_handle_unreference_unlocked(nvbo->gem); if (ret) drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
int nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_bo *ntfy = NULL; uint32_t flags; int ret; if (nouveau_vram_notify) flags = TTM_PL_FLAG_VRAM; else flags = TTM_PL_FLAG_TT; ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0x0000, false, true, &ntfy); if (ret) return ret; ret = nouveau_bo_pin(ntfy, flags); if (ret) goto out_err; ret = nouveau_bo_map(ntfy); if (ret) goto out_err; ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); if (ret) goto out_err; chan->notifier_bo = ntfy; out_err: if (ret) { mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(ntfy->gem); mutex_unlock(&dev->struct_mutex); } return ret; }
int nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_bo *ntfy = NULL; uint32_t flags, ttmpl; int ret; if (nouveau_vram_notify) { flags = NOUVEAU_GEM_DOMAIN_VRAM; ttmpl = TTM_PL_FLAG_VRAM; } else { flags = NOUVEAU_GEM_DOMAIN_GART; ttmpl = TTM_PL_FLAG_TT; } ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); if (ret) return ret; ret = nouveau_bo_pin(ntfy, ttmpl); if (ret) goto out_err; ret = nouveau_bo_map(ntfy); if (ret) goto out_err; ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); if (ret) goto out_err; chan->notifier_bo = ntfy; out_err: if (ret) drm_gem_object_unreference_unlocked(ntfy->gem); return ret; }
static int nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct nouveau_framebuffer *nouveau_fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; struct drm_mode_fb_cmd mode_cmd; struct pci_dev *pdev = dev->pdev; struct device *device = &pdev->dev; int size, ret; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); mode_cmd.pitch = roundup(mode_cmd.pitch, 256); mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = roundup(size, PAGE_SIZE); ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); if (ret) { NV_ERROR(dev, "failed to allocate framebuffer\n"); goto out; } ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "failed to pin fb: %d\n", ret); nouveau_bo_ref(NULL, &nvbo); goto out; } ret = nouveau_bo_map(nvbo); if (ret) { NV_ERROR(dev, "failed to map fb: %d\n", ret); nouveau_bo_unpin(nvbo); nouveau_bo_ref(NULL, &nvbo); goto out; } chan = nouveau_nofbaccel ? NULL : dev_priv->channel; if (chan && dev_priv->card_type >= NV_50) { ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); if (ret) { NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); chan = NULL; } } mutex_lock(&dev->struct_mutex); info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unref; } ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unref; } info->par = nfbdev; nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); nouveau_fb = &nfbdev->nouveau_fb; fb = &nouveau_fb->base; /* setup helper */ nfbdev->helper.fb = fb; nfbdev->helper.fbdev = info; strcpy(info->fix.id, "nouveaufb"); if (nouveau_nofbaccel) info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; else info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->flags |= FBINFO_CAN_FORCE_OUTPUT; info->fbops = &nouveau_fbcon_sw_ops; info->fix.smem_start = nvbo->bo.mem.bus.base + nvbo->bo.mem.bus.offset; info->fix.smem_len = size; info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_size = size; drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); /* Set aperture base/size for vesafb takeover */ info->apertures = dev_priv->apertures; if (!info->apertures) { ret = -ENOMEM; goto out_unref; } info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; mutex_unlock(&dev->struct_mutex); if (dev_priv->channel && !nouveau_nofbaccel) { ret = -ENODEV; if (dev_priv->card_type < NV_50) ret = nv04_fbcon_accel_init(info); else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_accel_init(info); else ret = nvc0_fbcon_accel_init(info); if (ret == 0) info->fbops = &nouveau_fbcon_ops; } nouveau_fbcon_zfill(dev, nfbdev); /* To allow resizeing without swapping buffers */ NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", nouveau_fb->base.width, nouveau_fb->base.height, nvbo->bo.offset, nvbo); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unref: mutex_unlock(&dev->struct_mutex); out: return ret; }