Example #1
0
/*
 * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
 * for iommu. This function is only called once during first start, successive
 * start do not call this funciton.
 * @mmu - Pointer to mmu structure
 *
 * Create the  initial defaultpagetable and setup the iommu mappings to it
 * Return - 0 on success else error code
 */
static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
{
	int status = 0;
	int i = 0;
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_pagetable *pagetable = NULL;

	/* If chip is not 8960 then we use the 2nd context bank for pagetable
	 * switching on the 3D side for which a separate table is allocated */
	if (!cpu_is_msm8960() && msm_soc_version_supports_iommu_v1()) {
		mmu->priv_bank_table =
			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
		if (mmu->priv_bank_table == NULL) {
			status = -ENOMEM;
			goto err;
		}
	}
	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
	/* Return error if the default pagetable doesn't exist */
	if (mmu->defaultpagetable == NULL) {
		status = -ENOMEM;
		goto err;
	}
	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
				mmu->defaultpagetable;
	/* Map the IOMMU regsiters to only defaultpagetable */
	if (msm_soc_version_supports_iommu_v1()) {
		for (i = 0; i < iommu->unit_count; i++) {
			iommu->iommu_units[i].reg_map.priv |=
						KGSL_MEMFLAGS_GLOBAL;
			status = kgsl_mmu_map(pagetable,
				&(iommu->iommu_units[i].reg_map),
				GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
			if (status) {
				iommu->iommu_units[i].reg_map.priv &=
							~KGSL_MEMFLAGS_GLOBAL;
				goto err;
			}
		}
	}
	return status;
err:
	for (i--; i >= 0; i--) {
		kgsl_mmu_unmap(pagetable,
				&(iommu->iommu_units[i].reg_map));
		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
	}
	if (mmu->priv_bank_table) {
		kgsl_mmu_putpagetable(mmu->priv_bank_table);
		mmu->priv_bank_table = NULL;
	}
	if (mmu->defaultpagetable) {
		kgsl_mmu_putpagetable(mmu->defaultpagetable);
		mmu->defaultpagetable = NULL;
	}
	return status;
}
Example #2
0
static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
{
	int status = 0;
	int i = 0;
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_pagetable *pagetable = NULL;

	if (!cpu_is_msm8960()) {
		mmu->priv_bank_table =
			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
		if (mmu->priv_bank_table == NULL) {
			status = -ENOMEM;
			goto err;
		}
		iommu_pt = mmu->priv_bank_table->priv;
	}
	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
	
	if (mmu->defaultpagetable == NULL) {
		status = -ENOMEM;
		goto err;
	}
	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
				mmu->defaultpagetable;
	
	for (i = 0; i < iommu->unit_count; i++) {
		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
		status = kgsl_mmu_map(pagetable,
			&(iommu->iommu_units[i].reg_map),
			GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
		if (status) {
			iommu->iommu_units[i].reg_map.priv &=
							~KGSL_MEMFLAGS_GLOBAL;
			goto err;
		}
	}
	return status;
err:
	for (i--; i >= 0; i--) {
		kgsl_mmu_unmap(pagetable,
				&(iommu->iommu_units[i].reg_map));
		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
	}
	if (mmu->priv_bank_table) {
		kgsl_mmu_putpagetable(mmu->priv_bank_table);
		mmu->priv_bank_table = NULL;
	}
	if (mmu->defaultpagetable) {
		kgsl_mmu_putpagetable(mmu->defaultpagetable);
		mmu->defaultpagetable = NULL;
	}
	return status;
}
Example #3
0
/*
 * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
 * for iommu. This function is only called once during first start, successive
 * start do not call this funciton.
 * @mmu - Pointer to mmu structure
 *
 * Create the  initial defaultpagetable and setup the iommu mappings to it
 * Return - 0 on success else error code
 */
static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
{
	int status = 0;
	int i = 0;
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_pt *iommu_pt;

	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
	/* Return error if the default pagetable doesn't exist */
	if (mmu->defaultpagetable == NULL) {
		status = -ENOMEM;
		goto err;
	}
	/* Map the IOMMU regsiters to only defaultpagetable */
	for (i = 0; i < iommu->unit_count; i++) {
		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
		status = kgsl_mmu_map(mmu->defaultpagetable,
			&(iommu->iommu_units[i].reg_map),
			GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
		if (status) {
			iommu->iommu_units[i].reg_map.priv &=
							~KGSL_MEMFLAGS_GLOBAL;
			goto err;
		}
	}
	/*
	 * The dafault pagetable always has asid 0 assigned by the iommu driver
	 * and asid 1 is assigned to the private context.
	 */
	iommu_pt = mmu->defaultpagetable->priv;
	iommu_pt->asid = 0;
	set_bit(0, iommu->asids);
	set_bit(1, iommu->asids);
	return status;
err:
	for (i--; i >= 0; i--) {
		kgsl_mmu_unmap(mmu->defaultpagetable,
				&(iommu->iommu_units[i].reg_map));
		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
	}
	if (mmu->defaultpagetable) {
		kgsl_mmu_putpagetable(mmu->defaultpagetable);
		mmu->defaultpagetable = NULL;
	}
	return status;
}
Example #4
0
/*
 * kgsl_get_sync_lock - Init Sync Lock between GPU and CPU
 * @mmu - Pointer to mmu device
 *
 * Return - 0 on success else error code
 */
static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu *iommu = mmu->device->mmu.priv;
	int status = 0;
	struct kgsl_pagetable *pagetable = NULL;
	uint32_t lock_gpu_addr = 0;
	uint32_t lock_phy_addr = 0;
	uint32_t page_offset = 0;

	iommu->sync_lock_initialized = 0;

	if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
		KGSL_DRV_ERR(mmu->device,
		"The GPU microcode does not support IOMMUv1 sync opcodes\n");
		return -ENXIO;
	}

	/* Get the physical address of the Lock variables */
	lock_phy_addr = (msm_iommu_lock_initialize()
			- MSM_SHARED_RAM_BASE + msm_shared_ram_phys);

	if (!lock_phy_addr) {
		KGSL_DRV_ERR(mmu->device,
				"GPU CPU sync lock is not supported by kernel\n");
		return -ENXIO;
	}

	/* Align the physical address to PAGE boundary and store the offset */
	page_offset = (lock_phy_addr & (PAGE_SIZE - 1));
	lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1));
	iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr;

	iommu->sync_lock_desc.size =
				PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars));
	status =  memdesc_sg_phys(&iommu->sync_lock_desc,
				 iommu->sync_lock_desc.physaddr,
				 iommu->sync_lock_desc.size);

	if (status)
		return status;

	/* Map Lock variables to GPU pagetable */
	iommu->sync_lock_desc.priv |= KGSL_MEMFLAGS_GLOBAL;

	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
				mmu->defaultpagetable;

	status = kgsl_mmu_map(pagetable, &iommu->sync_lock_desc,
				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);

	if (status) {
		kgsl_mmu_unmap(pagetable, &iommu->sync_lock_desc);
		iommu->sync_lock_desc.priv &= ~KGSL_MEMFLAGS_GLOBAL;
		return status;
	}

	/* Store Lock variables GPU address  */
	lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr + page_offset);

	kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
		(offsetof(struct remote_iommu_petersons_spinlock,
			flag[PROC_APPS])));
	kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
		(offsetof(struct remote_iommu_petersons_spinlock,
			flag[PROC_GPU])));
	kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
		(offsetof(struct remote_iommu_petersons_spinlock, turn)));

	iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;

	/* Flag Sync Lock is Initialized  */
	iommu->sync_lock_initialized = 1;

	return status;
}
Example #5
0
int
kgsl_sharedmem_map(gsl_deviceid_t device_id, gsl_flags_t flags, const gsl_scatterlist_t *scatterlist, gsl_memdesc_t *memdesc)
{
    int              status = GSL_FAILURE;
    gsl_sharedmem_t  *shmem = &gsl_driver.shmem;
    int              aperture_index;
    gsl_deviceid_t   tmp_id;

    kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
                    "--> int kgsl_sharedmem_map(gsl_deviceid_t device_id=%D, gsl_flags_t flags=%x, gsl_scatterlist_t scatterlist=%S, gsl_memdesc_t *memdesc=%M)\n",
                    device_id, flags, memdesc, scatterlist );

    // execute pending device action
    tmp_id = (device_id != GSL_DEVICE_ANY) ? device_id : device_id+1;
    for ( ; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
    {
        if (gsl_driver.device[tmp_id-1].flags & GSL_FLAGS_INITIALIZED)
        {
            kgsl_device_runpending(&gsl_driver.device[tmp_id-1]);

            if (tmp_id == device_id)
            {
                break;
            }
        }
    }

    // convert any device to an actual existing device
    if (device_id == GSL_DEVICE_ANY)
    {
        for ( ; ; )
        {
            device_id++;

            if (device_id <= GSL_DEVICE_MAX)
            {
                if (gsl_driver.device[device_id-1].flags & GSL_FLAGS_INITIALIZED)
                {
                    break;
                }
            }
            else
            {
                kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid device.\n" );
                kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_map. Return value %B\n", GSL_FAILURE );
                return (GSL_FAILURE);
            }
        }
    }

    DEBUG_ASSERT(device_id > GSL_DEVICE_ANY && device_id <= GSL_DEVICE_MAX);

    if (shmem->flags & GSL_FLAGS_INITIALIZED)
    {
        aperture_index = kgsl_sharedmem_getapertureindex(shmem, GSL_APERTURE_EMEM, GSL_CHANNEL_1);

        if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
        {
            DEBUG_ASSERT(scatterlist->num);
            DEBUG_ASSERT(scatterlist->pages);

            status = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, scatterlist->num *GSL_PAGESIZE, memdesc);
            if (status == GSL_SUCCESS)
            {
                GSL_MEMDESC_APERTURE_SET(memdesc, aperture_index);
                GSL_MEMDESC_DEVICE_SET(memdesc, device_id);

                // mark descriptor's memory as externally allocated -- i.e. outside GSL
                GSL_MEMDESC_EXTALLOC_SET(memdesc, 1);

                status = kgsl_mmu_map(&gsl_driver.device[device_id-1].mmu, memdesc->gpuaddr, scatterlist, flags, current->tgid);
                if (status != GSL_SUCCESS)
                {
                    kgsl_memarena_free(shmem->apertures[aperture_index].memarena, memdesc);
                }
            }
        }
    }

    kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_map. Return value %B\n", status );

    return (status);
}
Example #6
0
int
kgsl_sharedmem_alloc0(gsl_deviceid_t device_id, gsl_flags_t flags, int sizebytes, gsl_memdesc_t *memdesc)
{
    gsl_apertureid_t  aperture_id;
    gsl_channelid_t   channel_id;
    gsl_deviceid_t    tmp_id;
    int               aperture_index, org_index;
    int               result  = GSL_FAILURE;
    gsl_mmu_t         *mmu    = NULL;
    gsl_sharedmem_t   *shmem  = &gsl_driver.shmem;

    kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE,
                    "--> int kgsl_sharedmem_alloc(gsl_deviceid_t device_id=%D, gsl_flags_t flags=%x, int sizebytes=%d, gsl_memdesc_t *memdesc=%M)\n",
                    device_id, flags, sizebytes, memdesc );

    DEBUG_ASSERT(sizebytes);
    DEBUG_ASSERT(memdesc);

    GSL_MEMFLAGS_APERTURE_GET(flags, aperture_id);
    GSL_MEMFLAGS_CHANNEL_GET(flags, channel_id);

    memset(memdesc, 0, sizeof(gsl_memdesc_t));

    if (!(shmem->flags & GSL_FLAGS_INITIALIZED))
    {
        kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Shared memory not initialized.\n" );
        kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", GSL_FAILURE );
        return (GSL_FAILURE);
    }

    // execute pending device action
    tmp_id = (device_id != GSL_DEVICE_ANY) ? device_id : device_id+1;
    for ( ; tmp_id <= GSL_DEVICE_MAX; tmp_id++)
    {
        if (gsl_driver.device[tmp_id-1].flags & GSL_FLAGS_INITIALIZED)
        {
            kgsl_device_runpending(&gsl_driver.device[tmp_id-1]);

            if (tmp_id == device_id)
            {
                break;
            }
        }
    }

    // convert any device to an actual existing device
    if (device_id == GSL_DEVICE_ANY)
    {
        for ( ; ; )
        {
            device_id++;

            if (device_id <= GSL_DEVICE_MAX)
            {
                if (gsl_driver.device[device_id-1].flags & GSL_FLAGS_INITIALIZED)
                {
                    break;
                }
            }
            else
            {
                kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_ERROR, "ERROR: Invalid device.\n" );
                kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", GSL_FAILURE );
                return (GSL_FAILURE);
            }
        }
    }

    DEBUG_ASSERT(device_id > GSL_DEVICE_ANY && device_id <= GSL_DEVICE_MAX);

    // get mmu reference
    mmu = &gsl_driver.device[device_id-1].mmu;

    aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);

    //do not proceed if it is a strict request, the aperture requested is not present, and the MMU is enabled
    if (!((flags & GSL_MEMFLAGS_STRICTREQUEST) && aperture_id != shmem->apertures[aperture_index].id && kgsl_mmu_isenabled(mmu)))
    {
        // do allocation
        result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);

        // if allocation failed
        if (result != GSL_SUCCESS)
        {
            org_index = aperture_index;

            // then failover to other channels within the current aperture
            for (channel_id = GSL_CHANNEL_1; channel_id < GSL_CHANNEL_MAX; channel_id++)
            {
                aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);

                if (aperture_index != org_index)
                {
                    // do allocation
                    result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);

                    if (result == GSL_SUCCESS)
                    {
                        break;
                    }
                }
            }

            // if allocation still has not succeeded, then failover to EMEM/MMU aperture, but
            // not if it's a strict request and the MMU is enabled
            if (result != GSL_SUCCESS && aperture_id != GSL_APERTURE_EMEM
                && !((flags & GSL_MEMFLAGS_STRICTREQUEST) && kgsl_mmu_isenabled(mmu)))
            {
                aperture_id = GSL_APERTURE_EMEM;

                // try every channel
                for (channel_id = GSL_CHANNEL_1; channel_id < GSL_CHANNEL_MAX; channel_id++)
                {
                    aperture_index = kgsl_sharedmem_getapertureindex(shmem, aperture_id, channel_id);

                    if (aperture_index != org_index)
                    {
                        // do allocation
                        result = kgsl_memarena_alloc(shmem->apertures[aperture_index].memarena, flags, sizebytes, memdesc);

                        if (result == GSL_SUCCESS)
                        {
                            break;
                        }
                    }
                }
            }
        }
    }

    if (result == GSL_SUCCESS)
    {
        GSL_MEMDESC_APERTURE_SET(memdesc, aperture_index);
        GSL_MEMDESC_DEVICE_SET(memdesc, device_id);

        if (kgsl_memarena_isvirtualized(shmem->apertures[aperture_index].memarena))
        {
            gsl_scatterlist_t scatterlist;

            scatterlist.contiguous = 0;
            scatterlist.num        = memdesc->size / GSL_PAGESIZE;

            if (memdesc->size & (GSL_PAGESIZE-1))
            {
                scatterlist.num++;
            }

            scatterlist.pages = kmalloc(sizeof(unsigned int) * scatterlist.num, GFP_KERNEL);
            if (scatterlist.pages)
            {
                // allocate physical pages
                result = kgsl_hal_allocphysical(memdesc->gpuaddr, scatterlist.num, scatterlist.pages);
                if (result == GSL_SUCCESS)
                {
                    result = kgsl_mmu_map(mmu, memdesc->gpuaddr, &scatterlist, flags, current->tgid);
                    if (result != GSL_SUCCESS)
                    {
                        kgsl_hal_freephysical(memdesc->gpuaddr, scatterlist.num, scatterlist.pages);
                    }
                }

                kfree(scatterlist.pages);
            }
            else
            {
                result = GSL_FAILURE;
            }

            if (result != GSL_SUCCESS)
            {
                kgsl_memarena_free(shmem->apertures[aperture_index].memarena, memdesc);
            }
        }
    }

    KGSL_DEBUG_TBDUMP_SETMEM( memdesc->gpuaddr, 0, memdesc->size );

    kgsl_log_write( KGSL_LOG_GROUP_MEMORY | KGSL_LOG_LEVEL_TRACE, "<-- kgsl_sharedmem_alloc. Return value %B\n", result );

    return (result);
}
/*
 * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
 * for iommu. This function is only called once during first start, successive
 * start do not call this funciton.
 * @mmu - Pointer to mmu structure
 *
 * Create the  initial defaultpagetable and setup the iommu mappings to it
 * Return - 0 on success else error code
 */
static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
{
	int status = 0;
	int i = 0;
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_pagetable *pagetable = NULL;

	/* If chip is not 8960 then we use the 2nd context bank for pagetable
	 * switching on the 3D side for which a separate table is allocated */
	if (!cpu_is_msm8960()) {
		mmu->priv_bank_table =
			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
		if (mmu->priv_bank_table == NULL) {
			status = -ENOMEM;
			goto err;
		}
		iommu_pt = mmu->priv_bank_table->priv;
		iommu_pt->asid = 1;
	}
	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
	/* Return error if the default pagetable doesn't exist */
	if (mmu->defaultpagetable == NULL) {
		status = -ENOMEM;
		goto err;
	}
	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
				mmu->defaultpagetable;
	/* Map the IOMMU regsiters to only defaultpagetable */
	for (i = 0; i < iommu->unit_count; i++) {
		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
		status = kgsl_mmu_map(pagetable,
			&(iommu->iommu_units[i].reg_map),
			GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
		if (status) {
			iommu->iommu_units[i].reg_map.priv &=
							~KGSL_MEMFLAGS_GLOBAL;
			goto err;
		}
	}
	/*
	 * The dafault pagetable always has asid 0 assigned by the iommu driver
	 * and asid 1 is assigned to the private context.
	 */
	iommu_pt = mmu->defaultpagetable->priv;
	iommu_pt->asid = 0;
	set_bit(0, iommu->asids);
	set_bit(1, iommu->asids);
	return status;
err:
	for (i--; i >= 0; i--) {
		kgsl_mmu_unmap(pagetable,
				&(iommu->iommu_units[i].reg_map));
		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
	}
	if (mmu->priv_bank_table) {
		kgsl_mmu_putpagetable(mmu->priv_bank_table);
		mmu->priv_bank_table = NULL;
	}
	if (mmu->defaultpagetable) {
		kgsl_mmu_putpagetable(mmu->defaultpagetable);
		mmu->defaultpagetable = NULL;
	}
	return status;
}