static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t *agp = data; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return -EINVAL; block = mmAllocMem(dev_priv->AGPHeap, agp->size, 0, 0); if (block) { /* TODO */ agp->offset = block->ofs; agp->free = (unsigned long)block; if (!add_alloc_set(agp->context, AGP_TYPE, agp->free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock) agp->free); retval = -1; } } else { agp->offset = 0; agp->size = 0; agp->free = 0; } DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp->size, agp->offset); return retval; }
void * _mesa_exec_malloc(GLuint size) { struct mem_block *block = NULL; void *addr = NULL; _glthread_LOCK_MUTEX(exec_mutex); if (!init_heap()) goto bail; if (exec_heap) { size = (size + 31) & ~31; block = mmAllocMem( exec_heap, size, 32, 0 ); } if (block) addr = exec_mem + block->ofs; else printf("_mesa_exec_malloc failed\n"); bail: _glthread_UNLOCK_MUTEX(exec_mutex); return addr; }
static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t *fb = data; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return -EINVAL; block = mmAllocMem(dev_priv->FBHeap, fb->size, 0, 0); if (block) { /* TODO */ fb->offset = block->ofs; fb->free = (unsigned long)block; if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock) fb->free); retval = -EINVAL; } } else { fb->offset = 0; fb->size = 0; fb->free = 0; } DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb->size, fb->offset); return retval; }
int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t agp; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp)); block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0); if (block) { /* TODO */ agp.offset = block->ofs; agp.free = (unsigned long)block; if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock)agp.free); retval = -1; } } else { agp.offset = 0; agp.size = 0; agp.free = 0; } DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, agp, sizeof(agp)); DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset); return retval; }
int sis_fb_alloc( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t fb; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb)); block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0); if (block) { /* TODO */ fb.offset = block->ofs; fb.free = (unsigned long)block; if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock)fb.free); retval = DRM_ERR(EINVAL); } } else { fb.offset = 0; fb.size = 0; fb.free = 0; } DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb)); DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset); return retval; }
int sisp_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_sis_mem_t agp; PMemBlock block; int retval = 0; if(!AgpHeap) return -1; if (copy_from_user(&agp, (drm_sis_mem_t *)arg, sizeof(agp))) return -EFAULT; block = mmAllocMem(AgpHeap, agp.size, 0, 0); if(block){ /* TODO */ agp.offset = block->ofs; agp.free = (unsigned long)block; if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){ DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock)agp.free); retval = -1; } } else{ agp.offset = 0; agp.size = 0; agp.free = 0; } if (copy_to_user((drm_sis_mem_t *)arg, &agp, sizeof(agp))) return -EFAULT; DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset); return retval; }
/** * \brief Upload texture images. * * This might require removing our own and/or other client's texture objects to * make room for these images. * * \param rmesa Radeon context. * \param tObj texture object to upload. * * Sets the matching hardware texture format. Calculates which mipmap levels to * send, depending of the base image size, GL_TEXTURE_MIN_LOD, * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL and the * Radeon offset rules. Kicks out textures until the requested texture fits, * sets the texture hardware state and, while holding the hardware lock, * uploads any images that are new. */ static void radeonSetTexImages( radeonContextPtr rmesa, struct gl_texture_object *tObj ) { radeonTexObjPtr t = (radeonTexObjPtr)tObj->DriverData; const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel]; GLint totalSize; GLint texelsPerDword = 0, blitWidth = 0, blitPitch = 0; GLint x, y, width, height; GLint i; GLint firstLevel, lastLevel, numLevels; GLint log2Width, log2Height; GLuint txformat = 0; /* This code cannot be reached once we have lost focus */ assert(rmesa->radeonScreen->buffers); /* Set the hardware texture format */ switch (baseImage->TexFormat->MesaFormat) { case MESA_FORMAT_I8: txformat = RADEON_TXFORMAT_I8; texelsPerDword = 4; blitPitch = 64; break; case MESA_FORMAT_RGBA8888: txformat = RADEON_TXFORMAT_RGBA8888 | RADEON_TXFORMAT_ALPHA_IN_MAP; texelsPerDword = 1; blitPitch = 16; break; case MESA_FORMAT_RGB565: txformat = RADEON_TXFORMAT_RGB565; texelsPerDword = 2; blitPitch = 32; break; default: _mesa_problem(NULL, "unexpected texture format in radeonTexImage2D"); return; } t->pp_txformat &= ~(RADEON_TXFORMAT_FORMAT_MASK | RADEON_TXFORMAT_ALPHA_IN_MAP); t->pp_txformat |= txformat; /* Select the larger of the two widths for our global texture image * coordinate space. As the Radeon has very strict offset rules, we * can't upload mipmaps directly and have to reference their location * from the aligned start of the whole image. */ blitWidth = MAX2( baseImage->Width, blitPitch ); /* Calculate mipmap offsets and dimensions. */ totalSize = 0; x = 0; y = 0; /* Compute which mipmap levels we really want to send to the hardware. * This depends on the base image size, GL_TEXTURE_MIN_LOD, * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL. * Yes, this looks overly complicated, but it's all needed. */ firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5); firstLevel = MAX2(firstLevel, tObj->BaseLevel); lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5); lastLevel = MAX2(lastLevel, tObj->BaseLevel); lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2); lastLevel = MIN2(lastLevel, tObj->MaxLevel); lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */ /* save these values */ t->firstLevel = firstLevel; t->lastLevel = lastLevel; numLevels = lastLevel - firstLevel + 1; log2Width = tObj->Image[0][firstLevel]->WidthLog2; log2Height = tObj->Image[0][firstLevel]->HeightLog2; for ( i = 0 ; i < numLevels ; i++ ) { const struct gl_texture_image *texImage = tObj->Image[0][i + firstLevel]; if ( !texImage ) break; width = texImage->Width; height = texImage->Height; /* Texture images have a minimum pitch of 32 bytes (half of the * 64-byte minimum pitch for blits). For images that have a * width smaller than this, we must pad each texture image * scanline out to this amount. */ if ( width < blitPitch / 2 ) { width = blitPitch / 2; } totalSize += width * height * baseImage->TexFormat->TexelBytes; ASSERT( (totalSize & 31) == 0 ); while ( width < blitWidth && height > 1 ) { width *= 2; height /= 2; } ASSERT(i < RADEON_MAX_TEXTURE_LEVELS); t->image[i].x = x; t->image[i].y = y; t->image[i].width = width; t->image[i].height = height; /* While blits must have a pitch of at least 64 bytes, mipmaps * must be aligned on a 32-byte boundary (just like each texture * image scanline). */ if ( width >= blitWidth ) { y += height; } else { x += width; if ( x >= blitWidth ) { x = 0; y++; } } } /* Align the total size of texture memory block. */ t->totalSize = (totalSize + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK; /* Hardware state: */ t->pp_txfilter &= ~RADEON_MAX_MIP_LEVEL_MASK; t->pp_txfilter |= (numLevels - 1) << RADEON_MAX_MIP_LEVEL_SHIFT; t->pp_txformat &= ~(RADEON_TXFORMAT_WIDTH_MASK | RADEON_TXFORMAT_HEIGHT_MASK); t->pp_txformat |= ((log2Width << RADEON_TXFORMAT_WIDTH_SHIFT) | (log2Height << RADEON_TXFORMAT_HEIGHT_SHIFT)); t->dirty_state = TEX_ALL; /* Update the local texture LRU. */ move_to_head( &rmesa->texture.objects[0], t ); LOCK_HARDWARE( rmesa ); /* Kick out textures until the requested texture fits */ while ( !t->memBlock ) { t->memBlock = mmAllocMem( rmesa->texture.heap[0], t->totalSize, 12, 0); if (!t->memBlock) radeonSwapOutTexObj( rmesa, rmesa->texture.objects[0].prev ); } /* Set the base offset of the texture image */ t->bufAddr = rmesa->radeonScreen->texOffset[0] + t->memBlock->ofs; t->pp_txoffset = t->bufAddr; /* Upload any images that are new */ for ( i = 0 ; i < numLevels ; i++ ) { if ( t->dirty_images & (1 << i) ) { radeonUploadSubImage( rmesa, t, i, 0, 0, t->image[i].width, t->image[i].height ); } } rmesa->texture.age[0] = ++rmesa->sarea->texAge[0]; UNLOCK_HARDWARE( rmesa ); t->dirty_images = 0; }