예제 #1
0
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
			       uint64_t address_max, uint64_t alignment,
			       uint32_t flags)
{

	uint64_t head_addr;
	uint64_t ent_addr;
	/* points to previous list entry, NULL current entry is head of list */
	uint64_t prev_addr = 0;
	uint64_t new_ent_addr = 0;
	uint64_t desired_min_addr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
		     (unsigned long long)req_size,
		     (unsigned long long)address_min,
		     (unsigned long long)address_max,
		     (unsigned long long)alignment);
#endif

	if (cvmx_bootmem_desc->major_version > 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
			     "version: %d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		goto error_out;
	}

	/*
	 * Do a variety of checks to validate the arguments.  The
	 * allocator code will later assume that these checks have
	 * been made.  We validate that the requested constraints are
	 * not self-contradictory before we look through the list of
	 * available memory.
	 */

	/* 0 is not a valid req_size for this allocator */
	if (!req_size)
		goto error_out;

	/* Round req_size up to mult of minimum alignment bytes */
	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);

	/*
	 * Convert !0 address_min and 0 address_max to special case of
	 * range that specifies an exact memory block to allocate.  Do
	 * this before other checks and adjustments so that this
	 * tranformation will be validated.
	 */
	if (address_min && !address_max)
		address_max = address_min + req_size;
	else if (!address_min && !address_max)
		address_max = ~0ull;  /* If no limits given, use max limits */


	/*
	 * Enforce minimum alignment (this also keeps the minimum free block
	 * req_size the same as the alignment req_size.
	 */
	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;

	/*
	 * Adjust address minimum based on requested alignment (round
	 * up to meet alignment).  Do this here so we can reject
	 * impossible requests up front. (NOP for address_min == 0)
	 */
	if (alignment)
		address_min = ALIGN(address_min, alignment);

	/*
	 * Reject inconsistent args.  We have adjusted these, so this
	 * may fail due to our internal changes even if this check
	 * would pass for the values the user supplied.
	 */
	if (req_size > address_max - address_min)
		goto error_out;

	/* Walk through the list entries - first fit found is returned */

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_lock();
	head_addr = cvmx_bootmem_desc->head_addr;
	ent_addr = head_addr;
	for (; ent_addr;
	     prev_addr = ent_addr,
	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
		uint64_t usable_base, usable_max;
		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);

		if (cvmx_bootmem_phy_get_next(ent_addr)
		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
			cvmx_dprintf("Internal bootmem_alloc() error: ent: "
				"0x%llx, next: 0x%llx\n",
				(unsigned long long)ent_addr,
				(unsigned long long)
				cvmx_bootmem_phy_get_next(ent_addr));
			goto error_out;
		}

		/*
		 * Determine if this is an entry that can satisify the
		 * request Check to make sure entry is large enough to
		 * satisfy request.
		 */
		usable_base =
		    ALIGN(max(address_min, ent_addr), alignment);
		usable_max = min(address_max, ent_addr + ent_size);
		/*
		 * We should be able to allocate block at address
		 * usable_base.
		 */

		desired_min_addr = usable_base;
		/*
		 * Determine if request can be satisfied from the
		 * current entry.
		 */
		if (!((ent_addr + ent_size) > usable_base
				&& ent_addr < address_max
				&& req_size <= usable_max - usable_base))
			continue;
		/*
		 * We have found an entry that has room to satisfy the
		 * request, so allocate it from this entry.  If end
		 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
		 * the end of this block rather than the beginning.
		 */
		if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
			desired_min_addr = usable_max - req_size;
			/*
			 * Align desired address down to required
			 * alignment.
			 */
			desired_min_addr &= ~(alignment - 1);
		}

		/* Match at start of entry */
		if (desired_min_addr == ent_addr) {
			if (req_size < ent_size) {
				/*
				 * big enough to create a new block
				 * from top portion of block.
				 */
				new_ent_addr = ent_addr + req_size;
				cvmx_bootmem_phy_set_next(new_ent_addr,
					cvmx_bootmem_phy_get_next(ent_addr));
				cvmx_bootmem_phy_set_size(new_ent_addr,
							ent_size -
							req_size);

				/*
				 * Adjust next pointer as following
				 * code uses this.
				 */
				cvmx_bootmem_phy_set_next(ent_addr,
							new_ent_addr);
			}

			/*
			 * adjust prev ptr or head to remove this
			 * entry from list.
			 */
			if (prev_addr)
				cvmx_bootmem_phy_set_next(prev_addr,
					cvmx_bootmem_phy_get_next(ent_addr));
			else
				/*
				 * head of list being returned, so
				 * update head ptr.
				 */
				cvmx_bootmem_desc->head_addr =
					cvmx_bootmem_phy_get_next(ent_addr);

			if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
				cvmx_bootmem_unlock();
			return desired_min_addr;
		}
		/*
		 * block returned doesn't start at beginning of entry,
		 * so we know that we will be splitting a block off
		 * the front of this one.  Create a new block from the
		 * beginning, add to list, and go to top of loop
		 * again.
		 *
		 * create new block from high portion of
		 * block, so that top block starts at desired
		 * addr.
		 */
		new_ent_addr = desired_min_addr;
		cvmx_bootmem_phy_set_next(new_ent_addr,
					cvmx_bootmem_phy_get_next
					(ent_addr));
		cvmx_bootmem_phy_set_size(new_ent_addr,
					cvmx_bootmem_phy_get_size
					(ent_addr) -
					(desired_min_addr -
						ent_addr));
		cvmx_bootmem_phy_set_size(ent_addr,
					desired_min_addr - ent_addr);
		cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
		/* Loop again to handle actual alloc from new block */
	}
error_out:
	/* We didn't find anything, so return error */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_unlock();
	return -1;
}
예제 #2
0
unsigned int
intel_compute_size(struct intel_screen_private *intel,
                   int w, int h, int bpp, unsigned usage,
                   uint32_t *tiling, int *stride)
{
	int pitch, size;

	if (*tiling != I915_TILING_NONE) {
		/* First check whether tiling is necessary. */
		pitch = (w * bpp  + 7) / 8;
		pitch = ALIGN(pitch, 64);
		size = pitch * ALIGN (h, 2);
		if (INTEL_INFO(intel)->gen < 040) {
			/* Gen 2/3 has a maximum stride for tiling of
			 * 8192 bytes.
			 */
			if (pitch > KB(8))
				*tiling = I915_TILING_NONE;

			/* Narrower than half a tile? */
			if (pitch < 256)
				*tiling = I915_TILING_NONE;

			/* Older hardware requires fences to be pot size
			 * aligned with a minimum of 1 MiB, so causes
			 * massive overallocation for small textures.
			 */
			if (size < 1024*1024/2 && !intel->has_relaxed_fencing)
				*tiling = I915_TILING_NONE;
		} else if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && size <= 4096) {
			/* Disable tiling beneath a page size, we will not see
			 * any benefit from reducing TLB misses and instead
			 * just incur extra cost when we require a fence.
			 */
			*tiling = I915_TILING_NONE;
		}
	}

	pitch = (w * bpp + 7) / 8;
	if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && pitch <= 256)
		*tiling = I915_TILING_NONE;

	if (*tiling != I915_TILING_NONE) {
		int aligned_h, tile_height;

		if (IS_GEN2(intel))
			tile_height = 16;
		else if (*tiling == I915_TILING_X)
			tile_height = 8;
		else
			tile_height = 32;
		aligned_h = ALIGN(h, tile_height);

		*stride = intel_get_fence_pitch(intel,
						ALIGN(pitch, 512),
						*tiling);

		/* Round the object up to the size of the fence it will live in
		 * if necessary.  We could potentially make the kernel allocate
		 * a larger aperture space and just bind the subset of pages in,
		 * but this is easier and also keeps us out of trouble (as much)
		 * with drm_intel_bufmgr_check_aperture().
		 */
		size = intel_get_fence_size(intel, *stride * aligned_h);

		if (size > intel->max_tiling_size)
			*tiling = I915_TILING_NONE;
	}

	if (*tiling == I915_TILING_NONE) {
		/* We only require a 64 byte alignment for scanouts, but
		 * a 256 byte alignment for sharing with PRIME.
		 */
		*stride = ALIGN(pitch, 256);
		/* Round the height up so that the GPU's access to a 2x2 aligned
		 * subspan doesn't address an invalid page offset beyond the
		 * end of the GTT.
		 */
		size = *stride * ALIGN(h, 2);
	}

	return size;
}
예제 #3
0
/*
===================
R_AddSingleModel

May be run in parallel.

Here is where dynamic models actually get instantiated, and necessary
interaction surfaces get created. This is all done on a sort-by-model
basis to keep source data in cache (most likely L2) as any interactions
and shadows are generated, since dynamic models will typically be lit by
two or more lights.
===================
*/
void R_AddSingleModel( viewEntity_t* vEntity )
{
	// we will add all interaction surfs here, to be chained to the lights in later serial code
	vEntity->drawSurfs = NULL;
	vEntity->staticShadowVolumes = NULL;
	vEntity->dynamicShadowVolumes = NULL;
	
	// globals we really should pass in...
	const viewDef_t* viewDef = tr.viewDef;
	
	idRenderEntityLocal* entityDef = vEntity->entityDef;
	const renderEntity_t* renderEntity = &entityDef->parms;
	const idRenderWorldLocal* world = entityDef->world;
	
	if( viewDef->isXraySubview && entityDef->parms.xrayIndex == 1 )
	{
		return;
	}
	else if( !viewDef->isXraySubview && entityDef->parms.xrayIndex == 2 )
	{
		return;
	}
	
	SCOPED_PROFILE_EVENT( renderEntity->hModel == NULL ? "Unknown Model" : renderEntity->hModel->Name() );
	
	// calculate the znear for testing whether or not the view is inside a shadow projection
	const float znear = ( viewDef->renderView.cramZNear ) ? ( r_znear.GetFloat() * 0.25f ) : r_znear.GetFloat();
	
	// if the entity wasn't seen through a portal chain, it was added just for light shadows
	const bool modelIsVisible = !vEntity->scissorRect.IsEmpty();
	const bool addInteractions = modelIsVisible && ( !viewDef->isXraySubview || entityDef->parms.xrayIndex == 2 );
	const int entityIndex = entityDef->index;
	
	//---------------------------
	// Find which of the visible lights contact this entity
	//
	// If the entity doesn't accept light or cast shadows from any surface,
	// this can be skipped.
	//
	// OPTIMIZE: world areas can assume all referenced lights are used
	//---------------------------
	int	numContactedLights = 0;
	static const int MAX_CONTACTED_LIGHTS = 128;
	viewLight_t* contactedLights[MAX_CONTACTED_LIGHTS];
	idInteraction* staticInteractions[MAX_CONTACTED_LIGHTS];
	
	if( renderEntity->hModel == NULL ||
			renderEntity->hModel->ModelHasInteractingSurfaces() ||
			renderEntity->hModel->ModelHasShadowCastingSurfaces() )
	{
		SCOPED_PROFILE_EVENT( "Find lights" );
		for( viewLight_t* vLight = viewDef->viewLights; vLight != NULL; vLight = vLight->next )
		{
			if( vLight->scissorRect.IsEmpty() )
			{
				continue;
			}
			if( vLight->entityInteractionState != NULL )
			{
				// new code path, everything was done in AddLight
				if( vLight->entityInteractionState[entityIndex] == viewLight_t::INTERACTION_YES )
				{
					contactedLights[numContactedLights] = vLight;
					staticInteractions[numContactedLights] = world->interactionTable[vLight->lightDef->index * world->interactionTableWidth + entityIndex];
					if( ++numContactedLights == MAX_CONTACTED_LIGHTS )
					{
						break;
					}
				}
				continue;
			}
			
			const idRenderLightLocal* lightDef = vLight->lightDef;
			
			if( !lightDef->globalLightBounds.IntersectsBounds( entityDef->globalReferenceBounds ) )
			{
				continue;
			}
			
			if( R_CullModelBoundsToLight( lightDef, entityDef->localReferenceBounds, entityDef->modelRenderMatrix ) )
			{
				continue;
			}
			
			if( !modelIsVisible )
			{
				// some lights have their center of projection outside the world
				if( lightDef->areaNum != -1 )
				{
					// if no part of the model is in an area that is connected to
					// the light center (it is behind a solid, closed door), we can ignore it
					bool areasConnected = false;
					for( areaReference_t* ref = entityDef->entityRefs; ref != NULL; ref = ref->ownerNext )
					{
						if( world->AreasAreConnected( lightDef->areaNum, ref->area->areaNum, PS_BLOCK_VIEW ) )
						{
							areasConnected = true;
							break;
						}
					}
					if( areasConnected == false )
					{
						// can't possibly be seen or shadowed
						continue;
					}
				}
				
				// check more precisely for shadow visibility
				idBounds shadowBounds;
				R_ShadowBounds( entityDef->globalReferenceBounds, lightDef->globalLightBounds, lightDef->globalLightOrigin, shadowBounds );
				
				// this doesn't say that the shadow can't effect anything, only that it can't
				// effect anything in the view
				if( idRenderMatrix::CullBoundsToMVP( viewDef->worldSpace.mvp, shadowBounds ) )
				{
					continue;
				}
			}
			contactedLights[numContactedLights] = vLight;
			staticInteractions[numContactedLights] = world->interactionTable[vLight->lightDef->index * world->interactionTableWidth + entityIndex];
			if( ++numContactedLights == MAX_CONTACTED_LIGHTS )
			{
				break;
			}
		}
	}
	
	// if we aren't visible and none of the shadows stretch into the view,
	// we don't need to do anything else
	if( !modelIsVisible && numContactedLights == 0 )
	{
		return;
	}
	
	//---------------------------
	// create a dynamic model if the geometry isn't static
	//---------------------------
	idRenderModel* model = R_EntityDefDynamicModel( entityDef );
	if( model == NULL || model->NumSurfaces() <= 0 )
	{
		return;
	}
	
	// add the lightweight blood decal surfaces if the model is directly visible
	if( modelIsVisible )
	{
		assert( !vEntity->scissorRect.IsEmpty() );
		
		if( entityDef->decals != NULL && !r_skipDecals.GetBool() )
		{
			entityDef->decals->CreateDeferredDecals( model );
			
			unsigned int numDrawSurfs = entityDef->decals->GetNumDecalDrawSurfs();
			for( unsigned int i = 0; i < numDrawSurfs; i++ )
			{
				drawSurf_t* decalDrawSurf = entityDef->decals->CreateDecalDrawSurf( vEntity, i );
				if( decalDrawSurf != NULL )
				{
					decalDrawSurf->linkChain = NULL;
					decalDrawSurf->nextOnLight = vEntity->drawSurfs;
					vEntity->drawSurfs = decalDrawSurf;
				}
			}
		}
		
		if( entityDef->overlays != NULL && !r_skipOverlays.GetBool() )
		{
			entityDef->overlays->CreateDeferredOverlays( model );
			
			unsigned int numDrawSurfs = entityDef->overlays->GetNumOverlayDrawSurfs();
			for( unsigned int i = 0; i < numDrawSurfs; i++ )
			{
				drawSurf_t* overlayDrawSurf = entityDef->overlays->CreateOverlayDrawSurf( vEntity, model, i );
				if( overlayDrawSurf != NULL )
				{
					overlayDrawSurf->linkChain = NULL;
					overlayDrawSurf->nextOnLight = vEntity->drawSurfs;
					vEntity->drawSurfs = overlayDrawSurf;
				}
			}
		}
	}
	
	//---------------------------
	// copy matrix related stuff for back-end use
	// and setup a render matrix for faster culling
	//---------------------------
	vEntity->modelDepthHack = renderEntity->modelDepthHack;
	vEntity->weaponDepthHack = renderEntity->weaponDepthHack;
	vEntity->skipMotionBlur = renderEntity->skipMotionBlur;
	
	memcpy( vEntity->modelMatrix, entityDef->modelMatrix, sizeof( vEntity->modelMatrix ) );
	R_MatrixMultiply( entityDef->modelMatrix, viewDef->worldSpace.modelViewMatrix, vEntity->modelViewMatrix );
	
	idRenderMatrix viewMat;
	idRenderMatrix::Transpose( *( idRenderMatrix* )vEntity->modelViewMatrix, viewMat );
	idRenderMatrix::Multiply( viewDef->projectionRenderMatrix, viewMat, vEntity->mvp );
	if( renderEntity->weaponDepthHack )
	{
		idRenderMatrix::ApplyDepthHack( vEntity->mvp );
	}
	if( renderEntity->modelDepthHack != 0.0f )
	{
		idRenderMatrix::ApplyModelDepthHack( vEntity->mvp, renderEntity->modelDepthHack );
	}
	
	// local light and view origins are used to determine if the view is definitely outside
	// an extruded shadow volume, which means we can skip drawing the end caps
	idVec3 localViewOrigin;
	R_GlobalPointToLocal( vEntity->modelMatrix, viewDef->renderView.vieworg, localViewOrigin );
	
	//---------------------------
	// add all the model surfaces
	//---------------------------
	for( int surfaceNum = 0; surfaceNum < model->NumSurfaces(); surfaceNum++ )
	{
		const modelSurface_t* surf = model->Surface( surfaceNum );
		
		// for debugging, only show a single surface at a time
		if( r_singleSurface.GetInteger() >= 0 && surfaceNum != r_singleSurface.GetInteger() )
		{
			continue;
		}
		
		srfTriangles_t* tri = surf->geometry;
		if( tri == NULL )
		{
			continue;
		}
		if( tri->numIndexes == 0 )
		{
			continue;		// happens for particles
		}
		const idMaterial* shader = surf->shader;
		if( shader == NULL )
		{
			continue;
		}
		if( !shader->IsDrawn() )
		{
			continue;		// collision hulls, etc
		}
		
		// RemapShaderBySkin
		if( entityDef->parms.customShader != NULL )
		{
			// this is sort of a hack, but causes deformed surfaces to map to empty surfaces,
			// so the item highlight overlay doesn't highlight the autosprite surface
			if( shader->Deform() )
			{
				continue;
			}
			shader = entityDef->parms.customShader;
		}
		else if( entityDef->parms.customSkin )
		{
			shader = entityDef->parms.customSkin->RemapShaderBySkin( shader );
			if( shader == NULL )
			{
				continue;
			}
			if( !shader->IsDrawn() )
			{
				continue;
			}
		}
		
		// optionally override with the renderView->globalMaterial
		if( tr.primaryRenderView.globalMaterial != NULL )
		{
			shader = tr.primaryRenderView.globalMaterial;
		}
		
		SCOPED_PROFILE_EVENT( shader->GetName() );
		
		// debugging tool to make sure we have the correct pre-calculated bounds
		if( r_checkBounds.GetBool() )
		{
			for( int j = 0; j < tri->numVerts; j++ )
			{
				int k;
				for( k = 0; k < 3; k++ )
				{
					if( tri->verts[j].xyz[k] > tri->bounds[1][k] + CHECK_BOUNDS_EPSILON
							|| tri->verts[j].xyz[k] < tri->bounds[0][k] - CHECK_BOUNDS_EPSILON )
					{
						common->Printf( "bad tri->bounds on %s:%s\n", entityDef->parms.hModel->Name(), shader->GetName() );
						break;
					}
					if( tri->verts[j].xyz[k] > entityDef->localReferenceBounds[1][k] + CHECK_BOUNDS_EPSILON
							|| tri->verts[j].xyz[k] < entityDef->localReferenceBounds[0][k] - CHECK_BOUNDS_EPSILON )
					{
						common->Printf( "bad referenceBounds on %s:%s\n", entityDef->parms.hModel->Name(), shader->GetName() );
						break;
					}
				}
				if( k != 3 )
				{
					break;
				}
			}
		}
		
		// view frustum culling for the precise surface bounds, which is tighter
		// than the entire entity reference bounds
		// If the entire model wasn't visible, there is no need to check the
		// individual surfaces.
		const bool surfaceDirectlyVisible = modelIsVisible && !idRenderMatrix::CullBoundsToMVP( vEntity->mvp, tri->bounds );
		
		// RB: added check wether GPU skinning is available at all
		const bool gpuSkinned = ( tri->staticModelWithJoints != NULL && r_useGPUSkinning.GetBool() && glConfig.gpuSkinningAvailable );
		// RB end
		
		//--------------------------
		// base drawing surface
		//--------------------------
		drawSurf_t* baseDrawSurf = NULL;
		if( surfaceDirectlyVisible )
		{
			// make sure we have an ambient cache and all necessary normals / tangents
			if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
			{
				tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
			}
			
			if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
			{
				// we are going to use it for drawing, so make sure we have the tangents and normals
				if( shader->ReceivesLighting() && !tri->tangentsCalculated )
				{
					assert( tri->staticModelWithJoints == NULL );
					R_DeriveTangents( tri );
					
					// RB: this was hit by parametric particle models ..
					//assert( false );	// this should no longer be hit
					// RB end
				}
				tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
			}
			
			// add the surface for drawing
			// we can re-use some of the values for light interaction surfaces
			baseDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *baseDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );
			baseDrawSurf->frontEndGeo = tri;
			baseDrawSurf->space = vEntity;
			baseDrawSurf->scissorRect = vEntity->scissorRect;
			baseDrawSurf->extraGLState = 0;
			baseDrawSurf->renderZFail = 0;
			
			R_SetupDrawSurfShader( baseDrawSurf, shader, renderEntity );
			
			// Check for deformations (eyeballs, flares, etc)
			const deform_t shaderDeform = shader->Deform();
			if( shaderDeform != DFRM_NONE )
			{
				drawSurf_t* deformDrawSurf = R_DeformDrawSurf( baseDrawSurf );
				if( deformDrawSurf != NULL )
				{
					// any deforms may have created multiple draw surfaces
					for( drawSurf_t* surf = deformDrawSurf, * next = NULL; surf != NULL; surf = next )
					{
						next = surf->nextOnLight;
						
						surf->linkChain = NULL;
						surf->nextOnLight = vEntity->drawSurfs;
						vEntity->drawSurfs = surf;
					}
				}
			}
			
			// Most deform source surfaces do not need to be rendered.
			// However, particles are rendered in conjunction with the source surface.
			if( shaderDeform == DFRM_NONE || shaderDeform == DFRM_PARTICLE || shaderDeform == DFRM_PARTICLE2 )
			{
				// copy verts and indexes to this frame's hardware memory if they aren't already there
				if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
				{
					tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( tri->verts[0] ), VERTEX_CACHE_ALIGN ) );
				}
				if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
				{
					tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( tri->indexes[0] ), INDEX_CACHE_ALIGN ) );
				}
				
				R_SetupDrawSurfJoints( baseDrawSurf, tri, shader );
				
				baseDrawSurf->numIndexes = tri->numIndexes;
				baseDrawSurf->ambientCache = tri->ambientCache;
				baseDrawSurf->indexCache = tri->indexCache;
				baseDrawSurf->shadowCache = 0;
				
				baseDrawSurf->linkChain = NULL;		// link to the view
				baseDrawSurf->nextOnLight = vEntity->drawSurfs;
				vEntity->drawSurfs = baseDrawSurf;
			}
		}
		
		//----------------------------------------
		// add all light interactions
		//----------------------------------------
		for( int contactedLight = 0; contactedLight < numContactedLights; contactedLight++ )
		{
			viewLight_t* vLight = contactedLights[contactedLight];
			const idRenderLightLocal* lightDef = vLight->lightDef;
			const idInteraction* interaction = staticInteractions[contactedLight];
			
			// check for a static interaction
			surfaceInteraction_t* surfInter = NULL;
			if( interaction > INTERACTION_EMPTY && interaction->staticInteraction )
			{
				// we have a static interaction that was calculated accurately
				assert( model->NumSurfaces() == interaction->numSurfaces );
				surfInter = &interaction->surfaces[surfaceNum];
			}
			else
			{
				// try to do a more precise cull of this model surface to the light
				if( R_CullModelBoundsToLight( lightDef, tri->bounds, entityDef->modelRenderMatrix ) )
				{
					continue;
				}
			}
			
			// "invisible ink" lights and shaders (imp spawn drawing on walls, etc)
			if( shader->Spectrum() != lightDef->lightShader->Spectrum() )
			{
				continue;
			}
			
			// Calculate the local light origin to determine if the view is inside the shadow
			// projection and to calculate the triangle facing for dynamic shadow volumes.
			idVec3 localLightOrigin;
			R_GlobalPointToLocal( vEntity->modelMatrix, lightDef->globalLightOrigin, localLightOrigin );
			
			//--------------------------
			// surface light interactions
			//--------------------------
			
			dynamicShadowVolumeParms_t* dynamicShadowParms = NULL;
			
			if( addInteractions && surfaceDirectlyVisible && shader->ReceivesLighting() )
			{
				// static interactions can commonly find that no triangles from a surface
				// contact the light, even when the total model does
				if( surfInter == NULL || surfInter->lightTrisIndexCache > 0 )
				{
					// create a drawSurf for this interaction
					drawSurf_t* lightDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *lightDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );
					
					if( surfInter != NULL )
					{
						// optimized static interaction
						lightDrawSurf->numIndexes = surfInter->numLightTrisIndexes;
						lightDrawSurf->indexCache = surfInter->lightTrisIndexCache;
					}
					else
					{
						// throw the entire source surface at it without any per-triangle culling
						lightDrawSurf->numIndexes = tri->numIndexes;
						lightDrawSurf->indexCache = tri->indexCache;
						
						// optionally cull the triangles to the light volume
						if( r_cullDynamicLightTriangles.GetBool() )
						{
						
							vertCacheHandle_t lightIndexCache = vertexCache.AllocIndex( NULL, ALIGN( lightDrawSurf->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
							if( vertexCache.CacheIsCurrent( lightIndexCache ) )
							{
								lightDrawSurf->indexCache = lightIndexCache;
								
								dynamicShadowParms = ( dynamicShadowVolumeParms_t* )R_FrameAlloc( sizeof( dynamicShadowParms[0] ), FRAME_ALLOC_SHADOW_VOLUME_PARMS );
								
								dynamicShadowParms->verts = tri->verts;
								dynamicShadowParms->numVerts = tri->numVerts;
								dynamicShadowParms->indexes = tri->indexes;
								dynamicShadowParms->numIndexes = tri->numIndexes;
								dynamicShadowParms->silEdges = tri->silEdges;
								dynamicShadowParms->numSilEdges = tri->numSilEdges;
								dynamicShadowParms->joints = gpuSkinned ? tri->staticModelWithJoints->jointsInverted : NULL;
								dynamicShadowParms->numJoints = gpuSkinned ? tri->staticModelWithJoints->numInvertedJoints : 0;
								dynamicShadowParms->triangleBounds = tri->bounds;
								dynamicShadowParms->triangleMVP = vEntity->mvp;
								dynamicShadowParms->localLightOrigin = localLightOrigin;
								dynamicShadowParms->localViewOrigin = localViewOrigin;
								idRenderMatrix::Multiply( vLight->lightDef->baseLightProject, entityDef->modelRenderMatrix, dynamicShadowParms->localLightProject );
								dynamicShadowParms->zNear = znear;
								dynamicShadowParms->lightZMin = vLight->scissorRect.zmin;
								dynamicShadowParms->lightZMax = vLight->scissorRect.zmax;
								dynamicShadowParms->cullShadowTrianglesToLight = false;
								dynamicShadowParms->forceShadowCaps = false;
								dynamicShadowParms->useShadowPreciseInsideTest = false;
								dynamicShadowParms->useShadowDepthBounds = false;
								dynamicShadowParms->tempFacing = NULL;
								dynamicShadowParms->tempCulled = NULL;
								dynamicShadowParms->tempVerts = NULL;
								dynamicShadowParms->indexBuffer = NULL;
								dynamicShadowParms->shadowIndices = NULL;
								dynamicShadowParms->maxShadowIndices = 0;
								dynamicShadowParms->numShadowIndices = NULL;
								dynamicShadowParms->lightIndices = ( triIndex_t* )vertexCache.MappedIndexBuffer( lightIndexCache );
								dynamicShadowParms->maxLightIndices = lightDrawSurf->numIndexes;
								dynamicShadowParms->numLightIndices = &lightDrawSurf->numIndexes;
								dynamicShadowParms->renderZFail = NULL;
								dynamicShadowParms->shadowZMin = NULL;
								dynamicShadowParms->shadowZMax = NULL;
								dynamicShadowParms->shadowVolumeState = & lightDrawSurf->shadowVolumeState;
								
								lightDrawSurf->shadowVolumeState = SHADOWVOLUME_UNFINISHED;
								
								dynamicShadowParms->next = vEntity->dynamicShadowVolumes;
								vEntity->dynamicShadowVolumes = dynamicShadowParms;
							}
						}
					}
					lightDrawSurf->ambientCache = tri->ambientCache;
					lightDrawSurf->shadowCache = 0;
					lightDrawSurf->frontEndGeo = tri;
					lightDrawSurf->space = vEntity;
					lightDrawSurf->material = shader;
					lightDrawSurf->extraGLState = 0;
					lightDrawSurf->scissorRect = vLight->scissorRect; // interactionScissor;
					lightDrawSurf->sort = 0.0f;
					lightDrawSurf->renderZFail = 0;
					lightDrawSurf->shaderRegisters = baseDrawSurf->shaderRegisters;
					
					R_SetupDrawSurfJoints( lightDrawSurf, tri, shader );
					
					// Determine which linked list to add the light surface to.
					// There will only be localSurfaces if the light casts shadows and
					// there are surfaces with NOSELFSHADOW.
					if( shader->Coverage() == MC_TRANSLUCENT )
					{
						lightDrawSurf->linkChain = &vLight->translucentInteractions;
					}
					else if( !lightDef->parms.noShadows && shader->TestMaterialFlag( MF_NOSELFSHADOW ) )
					{
						lightDrawSurf->linkChain = &vLight->localInteractions;
					}
					else
					{
						lightDrawSurf->linkChain = &vLight->globalInteractions;
					}
					lightDrawSurf->nextOnLight = vEntity->drawSurfs;
					vEntity->drawSurfs = lightDrawSurf;
				}
			}
			
			//--------------------------
			// surface shadows
			//--------------------------
			
			if( !shader->SurfaceCastsShadow() )
			{
				continue;
			}
			if( !lightDef->LightCastsShadows() )
			{
				continue;
			}
			if( tri->silEdges == NULL )
			{
				continue;		// can happen for beam models (shouldn't use a shadow casting material, though...)
			}
			
			// if the static shadow does not have any shadows
			if( surfInter != NULL && surfInter->numShadowIndexes == 0 && !r_useShadowMapping.GetBool() )
			{
				continue;
			}
			
			// some entities, like view weapons, don't cast any shadows
			if( entityDef->parms.noShadow )
			{
				continue;
			}
			
			// No shadow if it's suppressed for this light.
			if( entityDef->parms.suppressShadowInLightID && entityDef->parms.suppressShadowInLightID == lightDef->parms.lightId )
			{
				continue;
			}
			
			// RB begin
			if( r_useShadowMapping.GetBool() )
			{
				//if( addInteractions && surfaceDirectlyVisible && shader->ReceivesLighting() )
				{
					// static interactions can commonly find that no triangles from a surface
					// contact the light, even when the total model does
					if( surfInter == NULL || surfInter->lightTrisIndexCache > 0 )
					{
						// create a drawSurf for this interaction
						drawSurf_t* shadowDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *shadowDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );
						
						if( surfInter != NULL )
						{
							// optimized static interaction
							shadowDrawSurf->numIndexes = surfInter->numLightTrisIndexes;
							shadowDrawSurf->indexCache = surfInter->lightTrisIndexCache;
						}
						else
						{
							// make sure we have an ambient cache and all necessary normals / tangents
							if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
							{
								tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
							}
							
							// throw the entire source surface at it without any per-triangle culling
							shadowDrawSurf->numIndexes = tri->numIndexes;
							shadowDrawSurf->indexCache = tri->indexCache;
						}
						
						if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
						{
							// we are going to use it for drawing, so make sure we have the tangents and normals
							if( shader->ReceivesLighting() && !tri->tangentsCalculated )
							{
								assert( tri->staticModelWithJoints == NULL );
								R_DeriveTangents( tri );
								
								// RB: this was hit by parametric particle models ..
								//assert( false );	// this should no longer be hit
								// RB end
							}
							tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
						}
						
						shadowDrawSurf->ambientCache = tri->ambientCache;
						shadowDrawSurf->shadowCache = 0;
						shadowDrawSurf->frontEndGeo = tri;
						shadowDrawSurf->space = vEntity;
						shadowDrawSurf->material = shader;
						shadowDrawSurf->extraGLState = 0;
						shadowDrawSurf->scissorRect = vLight->scissorRect; // interactionScissor;
						shadowDrawSurf->sort = 0.0f;
						shadowDrawSurf->renderZFail = 0;
						//shadowDrawSurf->shaderRegisters = baseDrawSurf->shaderRegisters;
						
						R_SetupDrawSurfJoints( shadowDrawSurf, tri, shader );
						
						// determine which linked list to add the shadow surface to
						
						//shadowDrawSurf->linkChain = shader->TestMaterialFlag( MF_NOSELFSHADOW ) ? &vLight->localShadows : &vLight->globalShadows;
						
						shadowDrawSurf->linkChain = &vLight->globalShadows;
						shadowDrawSurf->nextOnLight = vEntity->drawSurfs;
						
						vEntity->drawSurfs = shadowDrawSurf;
						
					}
				}
				
				
				continue;
			}
			// RB end
			
			if( lightDef->parms.prelightModel && lightDef->lightHasMoved == false &&
					entityDef->parms.hModel->IsStaticWorldModel() && !r_skipPrelightShadows.GetBool() )
			{
				// static light / world model shadow interacitons
				// are always captured in the prelight shadow volume
				continue;
			}
			
			// If the shadow is drawn (or translucent), but the model isn't, we must include the shadow caps
			// because we may be able to see into the shadow volume even though the view is outside it.
			// This happens for the player world weapon and possibly some animations in multiplayer.
			const bool forceShadowCaps = !addInteractions || r_forceShadowCaps.GetBool();
			
			drawSurf_t* shadowDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *shadowDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );
			
			if( surfInter != NULL )
			{
				shadowDrawSurf->numIndexes = 0;
				shadowDrawSurf->indexCache = surfInter->shadowIndexCache;
				shadowDrawSurf->shadowCache = tri->shadowCache;
				shadowDrawSurf->scissorRect = vLight->scissorRect;		// default to the light scissor and light depth bounds
				shadowDrawSurf->shadowVolumeState = SHADOWVOLUME_DONE;	// assume the shadow volume is done in case r_skipStaticShadows is set
				
				if( !r_skipStaticShadows.GetBool() )
				{
					staticShadowVolumeParms_t* staticShadowParms = ( staticShadowVolumeParms_t* )R_FrameAlloc( sizeof( staticShadowParms[0] ), FRAME_ALLOC_SHADOW_VOLUME_PARMS );
					
					staticShadowParms->verts = tri->staticShadowVertexes;
					staticShadowParms->numVerts = tri->numVerts * 2;
					staticShadowParms->indexes = surfInter->shadowIndexes;
					staticShadowParms->numIndexes = surfInter->numShadowIndexes;
					staticShadowParms->numShadowIndicesWithCaps = surfInter->numShadowIndexes;
					staticShadowParms->numShadowIndicesNoCaps = surfInter->numShadowIndexesNoCaps;
					staticShadowParms->triangleBounds = tri->bounds;
					staticShadowParms->triangleMVP = vEntity->mvp;
					staticShadowParms->localLightOrigin = localLightOrigin;
					staticShadowParms->localViewOrigin = localViewOrigin;
					staticShadowParms->zNear = znear;
					staticShadowParms->lightZMin = vLight->scissorRect.zmin;
					staticShadowParms->lightZMax = vLight->scissorRect.zmax;
					staticShadowParms->forceShadowCaps = forceShadowCaps;
					staticShadowParms->useShadowPreciseInsideTest = r_useShadowPreciseInsideTest.GetBool();
					staticShadowParms->useShadowDepthBounds = r_useShadowDepthBounds.GetBool();
					staticShadowParms->numShadowIndices = & shadowDrawSurf->numIndexes;
					staticShadowParms->renderZFail = & shadowDrawSurf->renderZFail;
					staticShadowParms->shadowZMin = & shadowDrawSurf->scissorRect.zmin;
					staticShadowParms->shadowZMax = & shadowDrawSurf->scissorRect.zmax;
					staticShadowParms->shadowVolumeState = & shadowDrawSurf->shadowVolumeState;
					
					shadowDrawSurf->shadowVolumeState = SHADOWVOLUME_UNFINISHED;
					
					staticShadowParms->next = vEntity->staticShadowVolumes;
					vEntity->staticShadowVolumes = staticShadowParms;
				}
				
			}
			else
			{
				// When CPU skinning the dynamic shadow verts of a dynamic model may not have been copied to buffer memory yet.
				if( !vertexCache.CacheIsCurrent( tri->shadowCache ) )
				{
					assert( !gpuSkinned );	// the shadow cache should be static when using GPU skinning
					// Extracts just the xyz values from a set of full size drawverts, and
					// duplicates them with w set to 0 and 1 for the vertex program to project.
					// This is constant for any number of lights, the vertex program takes care
					// of projecting the verts to infinity for a particular light.
					tri->shadowCache = vertexCache.AllocVertex( NULL, ALIGN( tri->numVerts * 2 * sizeof( idShadowVert ), VERTEX_CACHE_ALIGN ) );
					idShadowVert* shadowVerts = ( idShadowVert* )vertexCache.MappedVertexBuffer( tri->shadowCache );
					idShadowVert::CreateShadowCache( shadowVerts, tri->verts, tri->numVerts );
				}
				
				const int maxShadowVolumeIndexes = tri->numSilEdges * 6 + tri->numIndexes * 2;
				
				shadowDrawSurf->numIndexes = 0;
				shadowDrawSurf->indexCache = vertexCache.AllocIndex( NULL, ALIGN( maxShadowVolumeIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
				shadowDrawSurf->shadowCache = tri->shadowCache;
				shadowDrawSurf->scissorRect = vLight->scissorRect;		// default to the light scissor and light depth bounds
				shadowDrawSurf->shadowVolumeState = SHADOWVOLUME_DONE;	// assume the shadow volume is done in case the index cache allocation failed
				
				// if the index cache was successfully allocated then setup the parms to create a shadow volume in parallel
				if( vertexCache.CacheIsCurrent( shadowDrawSurf->indexCache ) && !r_skipDynamicShadows.GetBool() )
				{
				
					// if the parms were not already allocated for culling interaction triangles to the light frustum
					if( dynamicShadowParms == NULL )
					{
						dynamicShadowParms = ( dynamicShadowVolumeParms_t* )R_FrameAlloc( sizeof( dynamicShadowParms[0] ), FRAME_ALLOC_SHADOW_VOLUME_PARMS );
					}
					else
					{
						// the shadow volume will be rendered first so when the interaction surface is drawn the triangles have been culled for sure
						*dynamicShadowParms->shadowVolumeState = SHADOWVOLUME_DONE;
					}
					
					dynamicShadowParms->verts = tri->verts;
					dynamicShadowParms->numVerts = tri->numVerts;
					dynamicShadowParms->indexes = tri->indexes;
					dynamicShadowParms->numIndexes = tri->numIndexes;
					dynamicShadowParms->silEdges = tri->silEdges;
					dynamicShadowParms->numSilEdges = tri->numSilEdges;
					dynamicShadowParms->joints = gpuSkinned ? tri->staticModelWithJoints->jointsInverted : NULL;
					dynamicShadowParms->numJoints = gpuSkinned ? tri->staticModelWithJoints->numInvertedJoints : 0;
					dynamicShadowParms->triangleBounds = tri->bounds;
					dynamicShadowParms->triangleMVP = vEntity->mvp;
					dynamicShadowParms->localLightOrigin = localLightOrigin;
					dynamicShadowParms->localViewOrigin = localViewOrigin;
					idRenderMatrix::Multiply( vLight->lightDef->baseLightProject, entityDef->modelRenderMatrix, dynamicShadowParms->localLightProject );
					dynamicShadowParms->zNear = znear;
					dynamicShadowParms->lightZMin = vLight->scissorRect.zmin;
					dynamicShadowParms->lightZMax = vLight->scissorRect.zmax;
					dynamicShadowParms->cullShadowTrianglesToLight = r_cullDynamicShadowTriangles.GetBool();
					dynamicShadowParms->forceShadowCaps = forceShadowCaps;
					dynamicShadowParms->useShadowPreciseInsideTest = r_useShadowPreciseInsideTest.GetBool();
					dynamicShadowParms->useShadowDepthBounds = r_useShadowDepthBounds.GetBool();
					dynamicShadowParms->tempFacing = NULL;
					dynamicShadowParms->tempCulled = NULL;
					dynamicShadowParms->tempVerts = NULL;
					dynamicShadowParms->indexBuffer = NULL;
					dynamicShadowParms->shadowIndices = ( triIndex_t* )vertexCache.MappedIndexBuffer( shadowDrawSurf->indexCache );
					dynamicShadowParms->maxShadowIndices = maxShadowVolumeIndexes;
					dynamicShadowParms->numShadowIndices = & shadowDrawSurf->numIndexes;
					// dynamicShadowParms->lightIndices may have already been set for the interaction surface
					// dynamicShadowParms->maxLightIndices may have already been set for the interaction surface
					// dynamicShadowParms->numLightIndices may have already been set for the interaction surface
					dynamicShadowParms->renderZFail = & shadowDrawSurf->renderZFail;
					dynamicShadowParms->shadowZMin = & shadowDrawSurf->scissorRect.zmin;
					dynamicShadowParms->shadowZMax = & shadowDrawSurf->scissorRect.zmax;
					dynamicShadowParms->shadowVolumeState = & shadowDrawSurf->shadowVolumeState;
					
					shadowDrawSurf->shadowVolumeState = SHADOWVOLUME_UNFINISHED;
					
					// if the parms we not already linked for culling interaction triangles to the light frustum
					if( dynamicShadowParms->lightIndices == NULL )
					{
						dynamicShadowParms->next = vEntity->dynamicShadowVolumes;
						vEntity->dynamicShadowVolumes = dynamicShadowParms;
					}
					
					tr.pc.c_createShadowVolumes++;
				}
			}
			
			assert( vertexCache.CacheIsCurrent( shadowDrawSurf->shadowCache ) );
			assert( vertexCache.CacheIsCurrent( shadowDrawSurf->indexCache ) );
			
			shadowDrawSurf->ambientCache = 0;
			shadowDrawSurf->frontEndGeo = NULL;
			shadowDrawSurf->space = vEntity;
			shadowDrawSurf->material = NULL;
			shadowDrawSurf->extraGLState = 0;
			shadowDrawSurf->sort = 0.0f;
			shadowDrawSurf->shaderRegisters = NULL;
			
			R_SetupDrawSurfJoints( shadowDrawSurf, tri, NULL );
			
			// determine which linked list to add the shadow surface to
			shadowDrawSurf->linkChain = shader->TestMaterialFlag( MF_NOSELFSHADOW ) ? &vLight->localShadows : &vLight->globalShadows;
			shadowDrawSurf->nextOnLight = vEntity->drawSurfs;
			vEntity->drawSurfs = shadowDrawSurf;
		}
	}
}
예제 #4
0
파일: zynqpl.c 프로젝트: Bing0/u-boot
int zynq_load(Xilinx_desc *desc, const void *buf, size_t bsize)
{
	unsigned long ts; /* Timestamp */
	u32 partialbit = 0;
	u32 i, control, isr_status, status, swap, diff;
	u32 *buf_start;

	/* Detect if we are going working with partial or full bitstream */
	if (bsize != desc->size) {
		printf("%s: Working with partial bitstream\n", __func__);
		partialbit = 1;
	}

	buf_start = check_data((u8 *)buf, bsize, &swap);
	if (!buf_start)
		return FPGA_FAIL;

	/* Check if data is postpone from start */
	diff = (u32)buf_start - (u32)buf;
	if (diff) {
		printf("%s: Bitstream is not validated yet (diff %x)\n",
		       __func__, diff);
		return FPGA_FAIL;
	}

	if ((u32)buf < SZ_1M) {
		printf("%s: Bitstream has to be placed up to 1MB (%x)\n",
		       __func__, (u32)buf);
		return FPGA_FAIL;
	}

	if ((u32)buf != ALIGN((u32)buf, ARCH_DMA_MINALIGN)) {
		u32 *new_buf = (u32 *)ALIGN((u32)buf, ARCH_DMA_MINALIGN);

		printf("%s: Align buffer at %x to %x(swap %d)\n", __func__,
		       (u32)buf_start, (u32)new_buf, swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
		buf = new_buf;
	} else if (swap != SWAP_DONE) {
		/* For bitstream which are aligned */
		u32 *new_buf = (u32 *)buf;

		printf("%s: Bitstream is not swapped(%d) - swap it\n", __func__,
		       swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
	}

	/* Clear loopback bit */
	clrbits_le32(&devcfg_base->mctrl, DEVCFG_MCTRL_PCAP_LPBK);

	if (!partialbit) {
		zynq_slcr_devcfg_disable();

		/* Setting PCFG_PROG_B signal to high */
		control = readl(&devcfg_base->ctrl);
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);
		/* Setting PCFG_PROG_B signal to low */
		writel(control & ~DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Reset */
		ts = get_timer(0);
		while (readl(&devcfg_base->status) & DEVCFG_STATUS_PCFG_INIT) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to clear\n",
				       __func__);
				return FPGA_FAIL;
			}
		}

		/* Setting PCFG_PROG_B signal to high */
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Set */
		ts = get_timer(0);
		while (!(readl(&devcfg_base->status) &
			DEVCFG_STATUS_PCFG_INIT)) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to set\n",
				       __func__);
				return FPGA_FAIL;
			}
		}
	}

	isr_status = readl(&devcfg_base->int_sts);

	/* Clear it all, so if Boot ROM comes back, it can proceed */
	writel(0xFFFFFFFF, &devcfg_base->int_sts);

	if (isr_status & DEVCFG_ISR_FATAL_ERROR_MASK) {
		debug("%s: Fatal errors in PCAP 0x%X\n", __func__, isr_status);

		/* If RX FIFO overflow, need to flush RX FIFO first */
		if (isr_status & DEVCFG_ISR_RX_FIFO_OV) {
			writel(DEVCFG_MCTRL_RFIFO_FLUSH, &devcfg_base->mctrl);
			writel(0xFFFFFFFF, &devcfg_base->int_sts);
		}
		return FPGA_FAIL;
	}

	status = readl(&devcfg_base->status);

	debug("%s: Status = 0x%08X\n", __func__, status);

	if (status & DEVCFG_STATUS_DMA_CMD_Q_F) {
		debug("%s: Error: device busy\n", __func__);
		return FPGA_FAIL;
	}

	debug("%s: Device ready\n", __func__);

	if (!(status & DEVCFG_STATUS_DMA_CMD_Q_E)) {
		if (!(readl(&devcfg_base->int_sts) & DEVCFG_ISR_DMA_DONE)) {
			/* Error state, transfer cannot occur */
			debug("%s: ISR indicates error\n", __func__);
			return FPGA_FAIL;
		} else {
			/* Clear out the status */
			writel(DEVCFG_ISR_DMA_DONE, &devcfg_base->int_sts);
		}
	}

	if (status & DEVCFG_STATUS_DMA_DONE_CNT_MASK) {
		/* Clear the count of completed DMA transfers */
		writel(DEVCFG_STATUS_DMA_DONE_CNT_MASK, &devcfg_base->status);
	}

	debug("%s: Source = 0x%08X\n", __func__, (u32)buf);
	debug("%s: Size = %zu\n", __func__, bsize);

	/* flush(clean & invalidate) d-cache range buf */
	flush_dcache_range((u32)buf, (u32)buf +
			   roundup(bsize, ARCH_DMA_MINALIGN));

	/* Set up the transfer */
	writel((u32)buf | 1, &devcfg_base->dma_src_addr);
	writel(0xFFFFFFFF, &devcfg_base->dma_dst_addr);
	writel(bsize >> 2, &devcfg_base->dma_src_len);
	writel(0, &devcfg_base->dma_dst_len);

	isr_status = readl(&devcfg_base->int_sts);

	/* Polling the PCAP_INIT status for Set */
	ts = get_timer(0);
	while (!(isr_status & DEVCFG_ISR_DMA_DONE)) {
		if (isr_status & DEVCFG_ISR_ERROR_FLAGS_MASK) {
			debug("%s: Error: isr = 0x%08X\n", __func__,
			      isr_status);
			debug("%s: Write count = 0x%08X\n", __func__,
			      readl(&devcfg_base->write_count));
			debug("%s: Read count = 0x%08X\n", __func__,
			      readl(&devcfg_base->read_count));

			return FPGA_FAIL;
		}
		if (get_timer(ts) > CONFIG_SYS_FPGA_PROG_TIME) {
			printf("%s: Timeout wait for DMA to complete\n",
			       __func__);
			return FPGA_FAIL;
		}
		isr_status = readl(&devcfg_base->int_sts);
	}

	debug("%s: DMA transfer is done\n", __func__);

	/* Check FPGA configuration completion */
	ts = get_timer(0);
	while (!(isr_status & DEVCFG_ISR_PCFG_DONE)) {
		if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
			printf("%s: Timeout wait for FPGA to config\n",
			       __func__);
			return FPGA_FAIL;
		}
		isr_status = readl(&devcfg_base->int_sts);
	}

	debug("%s: FPGA config done\n", __func__);

	/* Clear out the DMA status */
	writel(DEVCFG_ISR_DMA_DONE, &devcfg_base->int_sts);

	if (!partialbit)
		zynq_slcr_devcfg_enable();

	return FPGA_SUCCESS;
}
예제 #5
0
파일: io.c 프로젝트: 119-org/hi3518-osdrv
/**
 * ubifs_wbuf_sync_nolock - synchronize write-buffer.
 * @wbuf: write-buffer to synchronize
 *
 * This function synchronizes write-buffer @buf and returns zero in case of
 * success or a negative error code in case of failure.
 *
 * Note, although write-buffers are of @c->max_write_size, this function does
 * not necessarily writes all @c->max_write_size bytes to the flash. Instead,
 * if the write-buffer is only partially filled with data, only the used part
 * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized.
 * This way we waste less space.
 */
int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
{
	struct ubifs_info *c = wbuf->c;
	int err, dirt, sync_len;

	cancel_wbuf_timer_nolock(wbuf);
	if (!wbuf->used || wbuf->lnum == -1)
		/* Write-buffer is empty or not seeked */
		return 0;

	dbg_io("LEB %d:%d, %d bytes, jhead %s",
	       wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
	ubifs_assert(!(wbuf->avail & 7));
	ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
	ubifs_assert(wbuf->size >= c->min_io_size);
	ubifs_assert(wbuf->size <= c->max_write_size);
	ubifs_assert(wbuf->size % c->min_io_size == 0);
	ubifs_assert(!c->ro_media && !c->ro_mount);
	if (c->leb_size - wbuf->offs >= c->max_write_size)
		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));

	if (c->ro_error)
		return -EROFS;

	/*
	 * Do not write whole write buffer but write only the minimum necessary
	 * amount of min. I/O units.
	 */
	sync_len = ALIGN(wbuf->used, c->min_io_size);
	dirt = sync_len - wbuf->used;
	if (dirt)
		ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
	err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
			    sync_len, wbuf->dtype);
	if (err) {
		ubifs_err("cannot write %d bytes to LEB %d:%d",
			  sync_len, wbuf->lnum, wbuf->offs);
		dbg_dump_stack();
		return err;
	}

	spin_lock(&wbuf->lock);
	wbuf->offs += sync_len;
	/*
	 * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.
	 * But our goal is to optimize writes and make sure we write in
	 * @c->max_write_size chunks and to @c->max_write_size-aligned offset.
	 * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make
	 * sure that @wbuf->offs + @wbuf->size is aligned to
	 * @c->max_write_size. This way we make sure that after next
	 * write-buffer flush we are again at the optimal offset (aligned to
	 * @c->max_write_size).
	 */
	if (c->leb_size - wbuf->offs < c->max_write_size)
		wbuf->size = c->leb_size - wbuf->offs;
	else if (wbuf->offs & (c->max_write_size - 1))
		wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
	else
		wbuf->size = c->max_write_size;
	wbuf->avail = wbuf->size;
	wbuf->used = 0;
	wbuf->next_ino = 0;
	spin_unlock(&wbuf->lock);

	if (wbuf->sync_callback)
		err = wbuf->sync_callback(c, wbuf->lnum,
					  c->leb_size - wbuf->offs, dirt);
	return err;
}
예제 #6
0
static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	struct ipv6_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;

	int hdr_len = skb->h.raw - skb->nh.raw;
	int nfrags;
	unsigned char *tmp_hdr = NULL;
	int ret = 0;

	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	if (elen <= 0 || (elen & (blksize-1))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_hdr) {
		ret = -ENOMEM;
		goto out_nofree;
	}
	memcpy(tmp_hdr, skb->nh.raw, hdr_len);

	/* If integrity check is required, do this. */
        if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];

		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			ret = -EINVAL;
			goto out;
		}
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
		ret = -EINVAL;
		goto out;
	}

	skb->ip_summed = CHECKSUM_NONE;

	esph = (struct ipv6_esp_hdr*)skb->data;
	iph = skb->nh.ipv6h;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

        {
		u8 nexthdr[2];
		struct scatterlist *sg = &esp->sgbuf[0];
		u8 padlen;

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg) {
				ret = -ENOMEM;
				goto out;
			}
		}
		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);

		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
			BUG();

		padlen = nexthdr[0];
		if (padlen+2 >= elen) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
			ret = -EINVAL;
			goto out;
		}
		/* ... check padding bits here. Silly. :-) */ 

		pskb_trim(skb, skb->len - alen - padlen - 2);
		skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen);
		skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
		memcpy(skb->nh.raw, tmp_hdr, hdr_len);
		skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
		ret = nexthdr[1];
	}

out:
	kfree(tmp_hdr);
out_nofree:
	return ret;
}
예제 #7
0
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
				    struct iwl_txq *txq,
				    struct iwl_device_cmd *dev_cmd,
				    struct sk_buff *skb,
				    struct iwl_cmd_meta *out_meta,
				    int hdr_len,
				    int tx_cmd_len,
				    bool pad)
{
	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
	dma_addr_t tb_phys;
	int len, tb1_len, tb2_len;
	void *tb1_addr;

	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);

	/* The first TB points to bi-directional DMA data */
	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);

	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);

	/*
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
	 */
	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
	      IWL_FIRST_TB_SIZE;

	if (pad)
		tb1_len = ALIGN(len, 4);
	else
		tb1_len = len;

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
		goto out_err;
	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);

	/* set up TFD's third entry to point to remainder of skb's head */
	tb2_len = skb_headlen(skb) - hdr_len;

	if (tb2_len > 0) {
		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
					 tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
			goto out_err;
		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
					skb->data + hdr_len,
					tb2_len);
	}

	if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
		goto out_err;

	return tfd;

out_err:
	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
	return NULL;
}
예제 #8
0
h264enc *h264enc_new(const struct h264enc_params *p)
{
	h264enc *c;
	int i;

	/* check parameter validity */
	if (!IS_ALIGNED(p->src_width, 16) || !IS_ALIGNED(p->src_height, 16) ||
		!IS_ALIGNED(p->width, 2) || !IS_ALIGNED(p->height, 2) ||
		p->width > p->src_width || p->height > p->src_height)
	{
		MSG("invalid picture size");
		return NULL;
	}

	if (p->qp == 0 || p->qp > 47)
	{
		MSG("invalid QP");
		return NULL;
	}

	if (p->src_format != H264_FMT_NV12 && p->src_format != H264_FMT_NV16)
	{
		MSG("invalid color format");
		return NULL;
	}

	/* allocate memory for h264enc structure */
	c = calloc(1, sizeof(*c));
	if (c == NULL)
	{
		MSG("can't allocate h264enc data");
		return NULL;
	}

	/* copy parameters */
	c->mb_width = DIV_ROUND_UP(p->width, 16);
	c->mb_height = DIV_ROUND_UP(p->height, 16);
	c->mb_stride = p->src_width / 16;

	c->crop_right = (c->mb_width * 16 - p->width) / 2;
	c->crop_bottom = (c->mb_height * 16 - p->height) / 2;

	c->profile_idc = p->profile_idc;
	c->level_idc = p->level_idc;

	c->entropy_coding_mode_flag = p->entropy_coding_mode ? 1 : 0;
	c->pic_init_qp = p->qp;
	c->keyframe_interval = p->keyframe_interval;

	c->write_sps_pps = 1;
	c->current_frame_num = 0;

	/* allocate input buffer */
	c->input_color_format = p->src_format;
	switch (c->input_color_format)
	{
	case H264_FMT_NV12:
		c->input_buffer_size = p->src_width * (p->src_height + p->src_height / 2);
		break;
	case H264_FMT_NV16:
		c->input_buffer_size = p->src_width * p->src_height * 2;
		break;
	}

	c->luma_buffer = ve_malloc(c->input_buffer_size);
	if (c->luma_buffer == NULL)
		goto nomem;

	c->chroma_buffer = c->luma_buffer + p->src_width * p->src_height;

	/* allocate bytestream output buffer */
	c->bytestream_buffer_size = 1 * 1024 * 1024;
	c->bytestream_buffer = ve_malloc(c->bytestream_buffer_size);
	if (c->bytestream_buffer == NULL)
		goto nomem;

	/* allocate reference picture memory */
	unsigned int luma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 16, 32);
	unsigned int chroma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 8, 32);
	for (i = 0; i < 2; i++)
	{
		c->ref_picture[i].luma_buffer = ve_malloc(luma_size + chroma_size);
		c->ref_picture[i].chroma_buffer = c->ref_picture[i].luma_buffer + luma_size;
		c->ref_picture[i].extra_buffer = ve_malloc(luma_size / 4);
		if (c->ref_picture[i].luma_buffer == NULL || c->ref_picture[i].extra_buffer == NULL)
			goto nomem;
	}

	/* allocate unknown purpose buffers */
	c->extra_buffer_frame = ve_malloc(ALIGN(c->mb_width, 4) * c->mb_height * 8);
	c->extra_buffer_line = ve_malloc(c->mb_width * 32);
	if (c->extra_buffer_frame == NULL || c->extra_buffer_line == NULL)
		goto nomem;

	return c;

nomem:
	MSG("can't allocate VE memory");
	h264enc_free(c);
	return NULL;
}
예제 #9
0
/**
 * sdma_v3_0_init_microcode - load ucode images from disk
 *
 * @adev: amdgpu_device pointer
 *
 * Use the firmware interface to load the ucode images into
 * the driver (not loaded into hw).
 * Returns 0 on success, error on failure.
 */
static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{
	const char *chip_name;
	char fw_name[30];
	int err = 0, i;
	struct amdgpu_firmware_info *info = NULL;
	const struct common_firmware_header *header = NULL;
	const struct sdma_firmware_header_v1_0 *hdr;

	DRM_DEBUG("\n");

	switch (adev->asic_type) {
	case CHIP_TONGA:
		chip_name = "tonga";
		break;
	case CHIP_FIJI:
		chip_name = "fiji";
		break;
	case CHIP_POLARIS11:
		chip_name = "polaris11";
		break;
	case CHIP_POLARIS10:
		chip_name = "polaris10";
		break;
	case CHIP_CARRIZO:
		chip_name = "carrizo";
		break;
	case CHIP_STONEY:
		chip_name = "stoney";
		break;
	default: BUG();
	}

	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (i == 0)
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
		else
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
		if (err)
			goto out;
		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
		if (err)
			goto out;
		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
		if (adev->sdma.instance[i].feature_version >= 20)
			adev->sdma.instance[i].burst_nop = true;

		if (adev->firmware.smu_load) {
			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
			info->fw = adev->sdma.instance[i].fw;
			header = (const struct common_firmware_header *)info->fw->data;
			adev->firmware.fw_size +=
				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
		}
	}
out:
	if (err) {
		printk(KERN_ERR
		       "sdma_v3_0: Failed to load firmware \"%s\"\n",
		       fw_name);
		for (i = 0; i < adev->sdma.num_instances; i++) {
			release_firmware(adev->sdma.instance[i].fw);
			adev->sdma.instance[i].fw = NULL;
		}
	}
	return err;
}
예제 #10
0
static int wl1271_boot_upload_nvs(struct wl1271 *wl)
{
	size_t nvs_len, burst_len;
	int i;
	u32 dest_addr, val;
	u8 *nvs_ptr, *nvs, *nvs_aligned;

	nvs = wl->nvs;
	if (nvs == NULL)
		return -ENODEV;

	nvs_ptr = nvs;

	nvs_len = wl->nvs_len;

	/* Update the device MAC address into the nvs */
	nvs[11] = wl->mac_addr[0];
	nvs[10] = wl->mac_addr[1];
	nvs[6] = wl->mac_addr[2];
	nvs[5] = wl->mac_addr[3];
	nvs[4] = wl->mac_addr[4];
	nvs[3] = wl->mac_addr[5];

	/*
	 * Layout before the actual NVS tables:
	 * 1 byte : burst length.
	 * 2 bytes: destination address.
	 * n bytes: data to burst copy.
	 *
	 * This is ended by a 0 length, then the NVS tables.
	 */

	/* FIXME: Do we need to check here whether the LSB is 1? */
	while (nvs_ptr[0]) {
		burst_len = nvs_ptr[0];
		dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));

		/* FIXME: Due to our new wl1271_translate_reg_addr function,
		   we need to add the REGISTER_BASE to the destination */
		dest_addr += REGISTERS_BASE;

		/* We move our pointer to the data */
		nvs_ptr += 3;

		for (i = 0; i < burst_len; i++) {
			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));

			wl1271_debug(DEBUG_BOOT,
				     "nvs burst write 0x%x: 0x%x",
				     dest_addr, val);
			wl1271_reg_write32(wl, dest_addr, val);

			nvs_ptr += 4;
			dest_addr += 4;
		}
	}

	/*
	 * We've reached the first zero length, the first NVS table
	 * is 7 bytes further.
	 */
	nvs_ptr += 7;
	nvs_len -= nvs_ptr - nvs;
	nvs_len = ALIGN(nvs_len, 4);

	/* FIXME: The driver sets the partition here, but this is not needed,
	   since it sets to the same one as currently in use */
	/* Now we must set the partition correctly */
	wl1271_set_partition(wl,
			     part_table[PART_WORK].mem.start,
			     part_table[PART_WORK].mem.size,
			     part_table[PART_WORK].reg.start,
			     part_table[PART_WORK].reg.size);

	/* Copy the NVS tables to a new block to ensure alignment */
	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);

	/* And finally we upload the NVS tables */
	/* FIXME: In wl1271, we upload everything at once.
	   No endianness handling needed here?! The ref driver doesn't do
	   anything about it at this point */
	wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len);

	kfree(nvs_aligned);
	return 0;
}
예제 #11
0
static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
						      8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->aperture_base = dev->mode_config.fb_base;
	if (!IS_GEN2(dev))
		info->aperture_size = pci_resource_len(dev->pdev, 2);
	else
		info->aperture_size = pci_resource_len(dev->pdev, 0);

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);


	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(dev->pdev, info);
	return 0;

out_unpin:
	i915_gem_object_unpin(obj);
out_unref:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
out:
	return ret;
}
static int do_iommu_domain_map(struct hisi_iommu_domain *hisi_domain,struct scatterlist *sgl,
		struct iommu_map_format *format, struct map_result *result)
{
	int ret;
	unsigned long phys_len, iova_size;
	unsigned long iova_start;

	struct gen_pool *pool;
	struct iommu_domain *domain;
	struct scatterlist *sg;
	struct tile_format fmt;
	/* calculate whole phys mem length */
	for (phys_len = 0, sg = sgl; sg; sg = sg_next(sg)) {
		phys_len += (unsigned long)ALIGN(sg->length, PAGE_SIZE);
	}

	/* get io virtual address size */
	if (format->is_tile) {
		unsigned long lines;
		unsigned long body_size;
		body_size = phys_len - format->header_size;
		lines = body_size / (format->phys_page_line * PAGE_SIZE);

		/*header need more lines virtual space*/
		if ( format->header_size ){
			unsigned long header_size;
			header_size = ALIGN(format->header_size ,format->virt_page_line * PAGE_SIZE);
			lines +=  header_size / (format->virt_page_line * PAGE_SIZE);
		}

		iova_size = lines * format->virt_page_line * PAGE_SIZE ;
	} else {
		iova_size = phys_len;
	}

	/* alloc iova */
	pool = hisi_domain->iova_pool;
	domain = hisi_domain->domain;
	iova_start = hisi_alloc_iova(pool,iova_size,hisi_domain->range.align);
	if (!iova_start) {
		printk("[%s]hisi_alloc_iova alloc 0x%lx failed!\n", __func__, iova_size);
		printk("[%s]dump iova pool begain--------------------------\n", __func__);
		printk("iova available: 0x%x\n",(unsigned int)hisi_iommu_iova_available());
		printk("alloc count: %d, free count: %d\n",
				dbg_inf.alloc_iova_count, dbg_inf.free_iova_count);
		printk("[%s]dump iova pool end   --------------------------\n", __func__);
		return -EINVAL;
	}

	if (0x100000000 < (iova_start + iova_size)) {
		pr_err("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! "
				"hisi iommu can not deal with iova 0x%lx size 0x%lx\n",
				iova_start, iova_size);
	}

	/* do map */
	if (format->is_tile) {
		fmt.is_tile = format->is_tile;
		fmt.phys_page_line = format->phys_page_line;
		fmt.virt_page_line = format->virt_page_line;
		fmt.header_size = format->header_size ;
		ret = iommu_map_tile(domain, iova_start, sgl, iova_size, 0,&fmt);
	} else {
		ret = iommu_map_range(domain, iova_start,sgl,(size_t)iova_size,format->prot);
	}

	if (ret) {
		printk(KERN_ERR "[%s]map failed!\n", __func__);
		hisi_free_iova(pool, iova_start, iova_size);
		return ret;
	}else {
		/* out put result */
		result->iova_start = iova_start;
		result->iova_size = iova_size;
	}
	return 0;
}
예제 #13
0
int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode)
{
	struct mdss_mdp_format_params *fmt;
	int i, rc;
	u32 bpp, ystride0_off, ystride1_off;
	if (ps == NULL)
		return -EINVAL;

	if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT))
		return -ERANGE;

	fmt = mdss_mdp_get_format_params(format);
	if (!fmt)
		return -EINVAL;

	bpp = fmt->bpp;
	memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));

	if (bwc_mode) {
		rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
		if (rc)
			return rc;
		ystride0_off = DIV_ROUND_UP(h, ps->rau_h[0]);
		ystride1_off = DIV_ROUND_UP(h, ps->rau_h[1]);
		ps->plane_size[0] = (ps->ystride[0] * ystride0_off) +
				    (ps->ystride[1] * ystride1_off);
		ps->ystride[0] += ps->ystride[1];
		ps->ystride[1] = 2;
		ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] *
				   (ystride0_off + ystride1_off);
	} else {
		if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
			ps->num_planes = 1;
			ps->plane_size[0] = w * h * bpp;
			ps->ystride[0] = w * bpp;
		} else if (format == MDP_Y_CBCR_H2V2_VENUS) {
			int cf = COLOR_FMT_NV12;
			ps->num_planes = 2;
			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
				ps->ystride[0];
			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
				ps->ystride[1];
		} else {
			u8 hmap[] = { 1, 2, 1, 2 };
			u8 vmap[] = { 1, 1, 2, 2 };
			u8 horiz, vert, stride_align, height_align;

			horiz = hmap[fmt->chroma_sample];
			vert = vmap[fmt->chroma_sample];

			switch (format) {
			case MDP_Y_CR_CB_GH2V2:
				stride_align = 16;
				height_align = 1;
				break;
			default:
				stride_align = 1;
				height_align = 1;
				break;
			}

			ps->ystride[0] = ALIGN(w, stride_align);
			ps->ystride[1] = ALIGN(w / horiz, stride_align);
			ps->plane_size[0] = ps->ystride[0] *
				ALIGN(h, height_align);
			ps->plane_size[1] = ps->ystride[1] * (h / vert);

			if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
				ps->num_planes = 2;
				ps->plane_size[1] *= 2;
				ps->ystride[1] *= 2;
			} else { /* planar */
				ps->num_planes = 3;
				ps->plane_size[2] = ps->plane_size[1];
				ps->ystride[2] = ps->ystride[1];
			}
		}
	}
	for (i = 0; i < ps->num_planes; i++)
		ps->total_size += ps->plane_size[i];

	return 0;
}
예제 #14
0
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
					   uint64_t max_addr,
					   uint64_t alignment,
					   char *name,
					   uint32_t flags)
{
	int64_t addr_allocated;
	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
		     (unsigned long long)size,
		     (unsigned long long)min_addr,
		     (unsigned long long)max_addr,
		     (unsigned long long)alignment,
		     name);
#endif
	if (cvmx_bootmem_desc->major_version != 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
			     "%d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		return -1;
	}

	/*
	 * Take lock here, as name lookup/block alloc/name add need to
	 * be atomic.
	 */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));

	/* Get pointer to first available named block descriptor */
	named_block_desc_ptr =
		cvmx_bootmem_phy_named_block_find(NULL,
						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);

	/*
	 * Check to see if name already in use, return error if name
	 * not available or no more room for blocks.
	 */
	if (cvmx_bootmem_phy_named_block_find(name,
					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
		return -1;
	}


	/*
	 * Round size up to mult of minimum alignment bytes We need
	 * the actual size allocated to allow for blocks to be
	 * coallesced when they are freed.  The alloc routine does the
	 * same rounding up on all allocations.
	 */
	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);

	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
						alignment,
						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
	if (addr_allocated >= 0) {
		named_block_desc_ptr->base_addr = addr_allocated;
		named_block_desc_ptr->size = size;
		strncpy(named_block_desc_ptr->name, name,
			cvmx_bootmem_desc->named_block_name_len);
		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
	}

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
	return addr_allocated;
}
예제 #15
0
파일: big_key.c 프로젝트: AK101111/linux
/*
 * Preparse a big key
 */
int big_key_preparse(struct key_preparsed_payload *prep)
{
	struct path *path = (struct path *)&prep->payload.data[big_key_path];
	struct file *file;
	u8 *enckey;
	u8 *data = NULL;
	ssize_t written;
	size_t datalen = prep->datalen;
	int ret;

	ret = -EINVAL;
	if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
		goto error;

	/* Set an arbitrary quota */
	prep->quotalen = 16;

	prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;

	if (datalen > BIG_KEY_FILE_THRESHOLD) {
		/* Create a shmem file to store the data in.  This will permit the data
		 * to be swapped out if needed.
		 *
		 * File content is stored encrypted with randomly generated key.
		 */
		size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));

		/* prepare aligned data to encrypt */
		data = kmalloc(enclen, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		memcpy(data, prep->data, datalen);
		memset(data + datalen, 0x00, enclen - datalen);

		/* generate random key */
		enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
		if (!enckey) {
			ret = -ENOMEM;
			goto error;
		}

		ret = big_key_gen_enckey(enckey);
		if (ret)
			goto err_enckey;

		/* encrypt aligned data */
		ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
		if (ret)
			goto err_enckey;

		/* save aligned data to file */
		file = shmem_kernel_file_setup("", enclen, 0);
		if (IS_ERR(file)) {
			ret = PTR_ERR(file);
			goto err_enckey;
		}

		written = kernel_write(file, data, enclen, 0);
		if (written != enclen) {
			ret = written;
			if (written >= 0)
				ret = -ENOMEM;
			goto err_fput;
		}

		/* Pin the mount and dentry to the key so that we can open it again
		 * later
		 */
		prep->payload.data[big_key_data] = enckey;
		*path = file->f_path;
		path_get(path);
		fput(file);
		kfree(data);
	} else {
		/* Just store the data in a buffer */
		void *data = kmalloc(datalen, GFP_KERNEL);

		if (!data)
			return -ENOMEM;

		prep->payload.data[big_key_data] = data;
		memcpy(data, prep->data, prep->datalen);
	}
	return 0;

err_fput:
	fput(file);
err_enckey:
	kfree(enckey);
error:
	kfree(data);
	return ret;
}
예제 #16
0
/* Upload a new set of constants.  Too much variability to go into the
 * cache mechanism, but maybe would benefit from a comparison against
 * the current uploaded set of constants.
 */
static void prepare_constant_buffer(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   const GLuint sz = brw->curbe.total_size;
   const GLuint bufsz = sz * 16 * sizeof(GLfloat);
   GLfloat *buf;
   GLuint i;

   if (sz == 0) {
      brw->curbe.last_bufsz  = 0;
      return;
   }

   buf = brw->curbe.next_buf;

   /* fragment shader constants */
   if (brw->curbe.wm_size) {
      GLuint offset = brw->curbe.wm_start * 16;

      /* copy float constants */
      for (i = 0; i < brw->wm.prog_data->nr_params; i++) {
	 buf[offset + i] = convert_param(brw->wm.prog_data->param_convert[i],
					 *brw->wm.prog_data->param[i]);
      }
   }


   /* The clipplanes are actually delivered to both CLIP and VS units.
    * VS uses them to calculate the outcode bitmasks.
    */
   if (brw->curbe.clip_size) {
      GLuint offset = brw->curbe.clip_start * 16;
      GLuint j;

      /* If any planes are going this way, send them all this way:
       */
      for (i = 0; i < 6; i++) {
	 buf[offset + i * 4 + 0] = fixed_plane[i][0];
	 buf[offset + i * 4 + 1] = fixed_plane[i][1];
	 buf[offset + i * 4 + 2] = fixed_plane[i][2];
	 buf[offset + i * 4 + 3] = fixed_plane[i][3];
      }

      /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
       * clip-space:
       */
      assert(MAX_CLIP_PLANES == 6);
      for (j = 0; j < MAX_CLIP_PLANES; j++) {
	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {
	    buf[offset + i * 4 + 0] = ctx->Transform._ClipUserPlane[j][0];
	    buf[offset + i * 4 + 1] = ctx->Transform._ClipUserPlane[j][1];
	    buf[offset + i * 4 + 2] = ctx->Transform._ClipUserPlane[j][2];
	    buf[offset + i * 4 + 3] = ctx->Transform._ClipUserPlane[j][3];
	    i++;
	 }
      }
   }

   /* vertex shader constants */
   if (brw->curbe.vs_size) {
      GLuint offset = brw->curbe.vs_start * 16;
      GLuint nr = brw->vs.prog_data->nr_params / 4;

      /* Load the subset of push constants that will get used when
       * we also have a pull constant buffer.
       */
      for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {
	 if (brw->vs.constant_map[i] != -1) {
	    assert(brw->vs.constant_map[i] <= nr);
	    memcpy(buf + offset + brw->vs.constant_map[i] * 4,
		   vp->program.Base.Parameters->ParameterValues[i],
		   4 * sizeof(float));
	 }
      }
   }

   if (0) {
      for (i = 0; i < sz*16; i+=4) 
	 printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);

      printf("last_buf %p buf %p sz %d/%d cmp %d\n",
	     brw->curbe.last_buf, buf,
	     bufsz, brw->curbe.last_bufsz,
	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
   }

   if (brw->curbe.curbe_bo != NULL &&
       bufsz == brw->curbe.last_bufsz &&
       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
      /* constants have not changed */
   } else {
      /* Update the record of what our last set of constants was.  We
       * don't just flip the pointers because we don't fill in the
       * data in the padding between the entries.
       */
      memcpy(brw->curbe.last_buf, buf, bufsz);
      brw->curbe.last_bufsz = bufsz;

      if (brw->curbe.curbe_bo != NULL &&
	  brw->curbe.curbe_next_offset + bufsz > brw->curbe.curbe_bo->size)
      {
	 drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
	 drm_intel_bo_unreference(brw->curbe.curbe_bo);
	 brw->curbe.curbe_bo = NULL;
      }

      if (brw->curbe.curbe_bo == NULL) {
	 /* Allocate a single page for CURBE entries for this batchbuffer.
	  * They're generally around 64b.
	  */
	 brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",
						  4096, 1 << 6);
	 brw->curbe.curbe_next_offset = 0;
	 drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);
	 assert(bufsz < 4096);
      }

      brw->curbe.curbe_offset = brw->curbe.curbe_next_offset;
      brw->curbe.curbe_next_offset += bufsz;
      brw->curbe.curbe_next_offset = ALIGN(brw->curbe.curbe_next_offset, 64);

      /* Copy data to the buffer:
       */
      memcpy(brw->curbe.curbe_bo->virtual + brw->curbe.curbe_offset,
	     buf,
	     bufsz);
   }

   brw_add_validated_bo(brw, brw->curbe.curbe_bo);

   /* Because this provokes an action (ie copy the constants into the
    * URB), it shouldn't be shortcircuited if identical to the
    * previous time - because eg. the urb destination may have
    * changed, or the urb contents different to last time.
    *
    * Note that the data referred to is actually copied internally,
    * not just used in place according to passed pointer.
    *
    * It appears that the CS unit takes care of using each available
    * URB entry (Const URB Entry == CURBE) in turn, and issuing
    * flushes as necessary when doublebuffering of CURBEs isn't
    * possible.
    */
}
예제 #17
0
파일: smbios.c 프로젝트: MattDevo/coreboot
unsigned long smbios_write_tables(unsigned long current)
{
	struct smbios_entry *se;
	unsigned long tables;
	int len = 0;
	int max_struct_size = 0;
	int handle = 0;

	current = ALIGN(current, 16);
	printk(BIOS_DEBUG, "%s: %08lx\n", __func__, current);

	se = (struct smbios_entry *)current;
	current += sizeof(struct smbios_entry);
	current = ALIGN(current, 16);

	tables = current;
	update_max(len, max_struct_size, smbios_write_type0(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type1(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type2(&current,
		handle, handle + 1)); /* The chassis handle is the next one */
	handle++;
	update_max(len, max_struct_size, smbios_write_type3(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type4(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type11(&current,
		&handle));
	if (IS_ENABLED(CONFIG_ELOG))
		update_max(len, max_struct_size,
			elog_smbios_write_type15(&current,handle++));
	update_max(len, max_struct_size, smbios_write_type16(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type17(&current,
		&handle));
	update_max(len, max_struct_size, smbios_write_type19(&current,
		handle++));
	update_max(len, max_struct_size, smbios_write_type20(&current,
		&handle));
	update_max(len, max_struct_size, smbios_write_type32(&current,
		handle++));

	update_max(len, max_struct_size, smbios_walk_device_tree(all_devices,
		&handle, &current));

	update_max(len, max_struct_size, smbios_write_type127(&current,
		handle++));

	memset(se, 0, sizeof(struct smbios_entry));
	memcpy(se->anchor, "_SM_", 4);
	se->length = sizeof(struct smbios_entry);
	se->major_version = 2;
	se->minor_version = 7;
	se->max_struct_size = max_struct_size;
	se->struct_count = handle;
	se->smbios_bcd_revision = 0x27;
	memcpy(se->intermediate_anchor_string, "_DMI_", 5);

	se->struct_table_address = (u32)tables;
	se->struct_table_length = len;

	se->intermediate_checksum = smbios_checksum((u8 *)se + 0x10,
						    sizeof(struct smbios_entry)
						    - 0x10);
	se->checksum = smbios_checksum((u8 *)se, sizeof(struct smbios_entry));
	return current;
}
예제 #18
0
static u8 *iv_of_dmreq(struct crypt_config *cc,
		       struct dm_crypt_request *dmreq)
{
	return (u8 *)ALIGN((unsigned long)(dmreq + 1),
		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
}
예제 #19
0
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
	int hdr_len;
	struct ipv6hdr *top_iph;
	struct ipv6_esp_hdr *esph;
	struct crypto_tfm *tfm;
	struct esp_data *esp;
	struct sk_buff *trailer;
	int blksize;
	int clen;
	int alen;
	int nfrags;

	esp = x->data;
	hdr_len = skb->h.raw - skb->data +
		  sizeof(*esph) + esp->conf.ivlen;

	/* Strip IP+ESP header. */
	__skb_pull(skb, hdr_len);

	/* Now skb is pure payload to encrypt */
	err = -ENOMEM;

	/* Round to block size */
	clen = skb->len;

	alen = esp->auth.icv_trunc_len;
	tfm = esp->conf.tfm;
	blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
	clen = ALIGN(clen + 2, blksize);
	if (esp->conf.padlen)
		clen = ALIGN(clen, esp->conf.padlen);

	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
		goto error;
	}

	/* Fill padding... */
	do {
		int i;
		for (i=0; i<clen-skb->len - 2; i++)
			*(u8*)(trailer->tail + i) = i+1;
	} while (0);
	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
	pskb_put(skb, trailer, clen - skb->len);

	top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);
	esph = (struct ipv6_esp_hdr *)skb->h.raw;
	top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));
	*(u8*)(trailer->tail - 1) = *skb->nh.raw;
	*skb->nh.raw = IPPROTO_ESP;

	esph->spi = x->id.spi;
	esph->seq_no = htonl(++x->replay.oseq);

	if (esp->conf.ivlen)
		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));

	do {
		struct scatterlist *sg = &esp->sgbuf[0];

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto error;
		}
		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
		crypto_cipher_encrypt(tfm, sg, sg, clen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);
	} while (0);

	if (esp->conf.ivlen) {
		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
	}

	if (esp->auth.icv_full_len) {
		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
			sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
		pskb_put(skb, trailer, alen);
	}

	err = 0;

error:
	return err;
}
예제 #20
0
파일: xvmalloc.c 프로젝트: 3null/fastsocket
/**
 * xv_malloc - Allocate block of given size from pool.
 * @pool: pool to allocate from
 * @size: size of block to allocate
 * @page: page no. that holds the object
 * @offset: location of object within page
 *
 * On success, <page, offset> identifies block allocated
 * and 0 is returned. On failure, <page, offset> is set to
 * 0 and -ENOMEM is returned.
 *
 * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
 */
int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
		u32 *offset, gfp_t flags)
{
	int error;
	u32 index, tmpsize, origsize, tmpoffset;
	struct block_header *block, *tmpblock;

	*page = NULL;
	*offset = 0;
	origsize = size;

	if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
		return -ENOMEM;

	size = ALIGN(size, XV_ALIGN);

	spin_lock(&pool->lock);

	index = find_block(pool, size, page, offset);

	if (!*page) {
		spin_unlock(&pool->lock);
		if (flags & GFP_NOWAIT)
			return -ENOMEM;
		error = grow_pool(pool, flags);
		if (unlikely(error))
			return error;

		spin_lock(&pool->lock);
		index = find_block(pool, size, page, offset);
	}

	if (!*page) {
		spin_unlock(&pool->lock);
		return -ENOMEM;
	}

	block = get_ptr_atomic(*page, *offset, KM_USER0);

	remove_block(pool, *page, *offset, block, index);

	/* Split the block if required */
	tmpoffset = *offset + size + XV_ALIGN;
	tmpsize = block->size - size;
	tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
	if (tmpsize) {
		tmpblock->size = tmpsize - XV_ALIGN;
		set_flag(tmpblock, BLOCK_FREE);
		clear_flag(tmpblock, PREV_FREE);

		set_blockprev(tmpblock, *offset);
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			insert_block(pool, *page, tmpoffset, tmpblock);

		if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
			tmpblock = BLOCK_NEXT(tmpblock);
			set_blockprev(tmpblock, tmpoffset);
		}
	} else {
		/* This block is exact fit */
		if (tmpoffset != PAGE_SIZE)
			clear_flag(tmpblock, PREV_FREE);
	}

	block->size = origsize;
	clear_flag(block, BLOCK_FREE);

	put_ptr_atomic(block, KM_USER0);
	spin_unlock(&pool->lock);

	*offset += XV_ALIGN;

	return 0;
}
예제 #21
0
unsigned long write_acpi_tables(unsigned long start)
{
	unsigned long current;
	acpi_rsdp_t *rsdp;
	acpi_rsdt_t *rsdt;
	acpi_hpet_t *hpet;
	acpi_madt_t *madt;
	acpi_srat_t *srat;
	acpi_slit_t *slit;
	acpi_fadt_t *fadt;
	acpi_facs_t *facs;
	acpi_header_t *dsdt;
	acpi_header_t *ssdt;
	acpi_header_t *ssdtx;
	void *p;

	int i;

	get_bus_conf(); //it will get sblk, pci1234, hcdn, and sbdn

	/* Align ACPI tables to 16 bytes */
	start = ALIGN(start, 16);
	current = start;

	printk(BIOS_INFO, "ACPI: Writing ACPI tables at %lx...\n", start);

	/* We need at least an RSDP and an RSDT Table */
	rsdp = (acpi_rsdp_t *) current;
	current += sizeof(acpi_rsdp_t);
	rsdt = (acpi_rsdt_t *) current;
	current += sizeof(acpi_rsdt_t);

	/* clear all table memory */
	memset((void *)start, 0, current - start);

	acpi_write_rsdp(rsdp, rsdt, NULL);
	acpi_write_rsdt(rsdt);

	/* FACS */
	printk(BIOS_DEBUG, "ACPI:    * FACS\n");
	facs = (acpi_facs_t *) current;
	current += sizeof(acpi_facs_t);
	acpi_create_facs(facs);

	/* DSDT */
	printk(BIOS_DEBUG, "ACPI:    * DSDT at %lx\n", current);
	dsdt = (acpi_header_t *)current;
	memcpy(dsdt, &AmlCode, sizeof(acpi_header_t));
	current += dsdt->length;
	memcpy(dsdt, &AmlCode, dsdt->length);
	printk(BIOS_DEBUG, "ACPI:    * DSDT @ %p Length %x\n", dsdt, dsdt->length);

	/* FADT */
	printk(BIOS_DEBUG, "ACPI:    * FADT at %lx\n", current);
	fadt = (acpi_fadt_t *) current;
	current += sizeof(acpi_fadt_t);

	acpi_create_fadt(fadt, facs, dsdt);
	acpi_add_table(rsdp, fadt);

	/*
	 * We explicitly add these tables later on:
	 */
	printk(BIOS_DEBUG, "ACPI:    * HPET at %lx\n", current);
	hpet = (acpi_hpet_t *) current;
	current += sizeof(acpi_hpet_t);
	acpi_create_hpet(hpet);
	acpi_add_table(rsdp, hpet);

	/* If we want to use HPET Timers Linux wants an MADT */
	printk(BIOS_DEBUG, "ACPI:    * MADT at %lx\n", current);
	madt = (acpi_madt_t *) current;
	acpi_create_madt(madt);
	current+=madt->header.length;
	acpi_add_table(rsdp, madt);

	/* SRAT */
	printk(BIOS_DEBUG, "ACPI:    * SRAT at %lx\n", current);
	srat = (acpi_srat_t *) current;
	acpi_create_srat(srat);
	current+=srat->header.length;
	acpi_add_table(rsdp, srat);

	/* SLIT */
	printk(BIOS_DEBUG, "ACPI:   * SLIT at %lx\n", current);
	slit = (acpi_slit_t *) current;
	acpi_create_slit(slit);
	current+=slit->header.length;
	acpi_add_table(rsdp, slit);

	/* SSDT */
	printk(BIOS_DEBUG, "ACPI:    * SSDT at %lx\n", current);
	ssdt = (acpi_header_t *)current;

	acpi_create_ssdt_generator(ssdt, ACPI_TABLE_CREATOR);
	current += ssdt->length;
	acpi_add_table(rsdp, ssdt);

#if CONFIG_ACPI_SSDTX_NUM >= 1

	//same htio, but different position? We may have to copy, change HCIN, and recalculate the checknum and add_table

	for(i=1;i<sysconf.hc_possible_num;i++) {  // 0: is hc sblink
		if((sysconf.pci1234[i] & 1) != 1 ) continue;
		u8 c;
		if(i<7) {
			c  = (u8) ('4' + i - 1);
		}
		else {
			c  = (u8) ('A' + i - 1 - 6);
		}
		current = ALIGN(current, 8);
		printk(BIOS_DEBUG, "ACPI:    * SSDT for PCI%c Aka hcid = %d\n", c, sysconf.hcid[i]); //pci0 and pci1 are in dsdt
		ssdtx = (acpi_header_t *)current;
		switch(sysconf.hcid[i]) {
		case 1: //8132
			p = &AmlCode_ssdt2;
			break;
		case 2: //8151
			p = &AmlCode_ssdt3;
			break;
		case 3: //8131
			p = &AmlCode_ssdt4;
			break;
		default:
			continue;
		}
		memcpy(ssdtx, p, sizeof(acpi_header_t));
		current += ssdtx->length;
		memcpy(ssdtx, p, ssdtx->length);
		update_ssdtx((void *)ssdtx, i);
		ssdtx->checksum = 0;
		ssdtx->checksum = acpi_checksum((u8 *)ssdtx, ssdtx->length);
		acpi_add_table(rsdp, ssdtx);
	}
#endif

	printk(BIOS_INFO, "ACPI: done.\n");
	return current;
}
예제 #22
0
파일: xvmalloc.c 프로젝트: 3null/fastsocket
/*
 * Free block identified with <page, offset>
 */
void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
{
	void *page_start;
	struct block_header *block, *tmpblock;

	offset -= XV_ALIGN;

	spin_lock(&pool->lock);

	page_start = get_ptr_atomic(page, 0, KM_USER0);
	block = (struct block_header *)((char *)page_start + offset);

	/* Catch double free bugs */
	BUG_ON(test_flag(block, BLOCK_FREE));

	block->size = ALIGN(block->size, XV_ALIGN);

	tmpblock = BLOCK_NEXT(block);
	if (offset + block->size + XV_ALIGN == PAGE_SIZE)
		tmpblock = NULL;

	/* Merge next block if its free */
	if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
		/*
		 * Blocks smaller than XV_MIN_ALLOC_SIZE
		 * are not inserted in any free list.
		 */
		if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
			remove_block(pool, page,
				    offset + block->size + XV_ALIGN, tmpblock,
				    get_index_for_insert(tmpblock->size));
		}
		block->size += tmpblock->size + XV_ALIGN;
	}

	/* Merge previous block if its free */
	if (test_flag(block, PREV_FREE)) {
		tmpblock = (struct block_header *)((char *)(page_start) +
						get_blockprev(block));
		offset = offset - tmpblock->size - XV_ALIGN;

		if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
			remove_block(pool, page, offset, tmpblock,
				    get_index_for_insert(tmpblock->size));

		tmpblock->size += block->size + XV_ALIGN;
		block = tmpblock;
	}

	/* No used objects in this page. Free it. */
	if (block->size == PAGE_SIZE - XV_ALIGN) {
		put_ptr_atomic(page_start, KM_USER0);
		spin_unlock(&pool->lock);

		__free_page(page);
		stat_dec(&pool->total_pages);
		return;
	}

	set_flag(block, BLOCK_FREE);
	if (block->size >= XV_MIN_ALLOC_SIZE)
		insert_block(pool, page, offset, block);

	if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
		tmpblock = BLOCK_NEXT(block);
		set_flag(tmpblock, PREV_FREE);
		set_blockprev(tmpblock, offset);
	}

	put_ptr_atomic(page_start, KM_USER0);
	spin_unlock(&pool->lock);
}
예제 #23
0
파일: extensions.c 프로젝트: xSIMx/Mesa-3D
/**
 * Construct the GL_EXTENSIONS string.  Called the first time that
 * glGetString(GL_EXTENSIONS) is called.
 */
GLubyte*
_mesa_make_extension_string(struct gl_context *ctx)
{
   /* The extension string. */
   char *exts = 0;
   /* Length of extension string. */
   size_t length = 0;
   /* Number of extensions */
   unsigned count;
   /* Indices of the extensions sorted by year */
   extension_index *extension_indices;
   /* String of extra extensions. */
   char *extra_extensions = get_extension_override(ctx);
   GLboolean *base = (GLboolean *) &ctx->Extensions;
   const struct extension *i;
   unsigned j;
   unsigned maxYear = ~0;
   unsigned api_set = (1 << ctx->API);
   if (_mesa_is_gles3(ctx))
      api_set |= ES3;

   /* Check if the MESA_EXTENSION_MAX_YEAR env var is set */
   {
      const char *env = getenv("MESA_EXTENSION_MAX_YEAR");
      if (env) {
         maxYear = atoi(env);
         _mesa_debug(ctx, "Note: limiting GL extensions to %u or earlier\n",
                     maxYear);
      }
   }

   /* Compute length of the extension string. */
   count = 0;
   for (i = extension_table; i->name != 0; ++i) {
      if (base[i->offset] &&
          i->year <= maxYear &&
          (i->api_set & api_set)) {
	 length += strlen(i->name) + 1; /* +1 for space */
	 ++count;
      }
   }
   if (extra_extensions != NULL)
      length += 1 + strlen(extra_extensions); /* +1 for space */

   exts = calloc(ALIGN(length + 1, 4), sizeof(char));
   if (exts == NULL) {
      free(extra_extensions);
      return NULL;
   }

   extension_indices = malloc(count * sizeof(extension_index));
   if (extension_indices == NULL) {
      free(exts);
      free(extra_extensions);
      return NULL;
   }

   /* Sort extensions in chronological order because certain old applications (e.g.,
    * Quake3 demo) store the extension list in a static size buffer so chronologically
    * order ensure that the extensions that such applications expect will fit into
    * that buffer.
    */
   j = 0;
   for (i = extension_table; i->name != 0; ++i) {
      if (base[i->offset] &&
          i->year <= maxYear &&
          (i->api_set & api_set)) {
         extension_indices[j++] = i - extension_table;
      }
   }
   assert(j == count);
   qsort(extension_indices, count, sizeof *extension_indices, extension_compare);

   /* Build the extension string.*/
   for (j = 0; j < count; ++j) {
      i = &extension_table[extension_indices[j]];
      assert(base[i->offset] && (i->api_set & api_set));
      strcat(exts, i->name);
      strcat(exts, " ");
   }
   free(extension_indices);
   if (extra_extensions != 0) {
      strcat(exts, extra_extensions);
      free(extra_extensions);
   }

   return (GLubyte *) exts;
}
예제 #24
0
void board_init_f(ulong bootflag)
{
	bd_t *bd;
	init_fnc_t **init_fnc_ptr;
	gd_t *id;
	ulong addr, addr_sp;
#ifdef CONFIG_PRAM
	ulong reg;
#endif
	void *new_fdt = NULL;
	size_t fdt_size = 0;

	bootstage_mark_name(BOOTSTAGE_ID_START_UBOOT_F, "board_init_f");

	/* Pointer is writable since we allocated a register for it */
	gd = (gd_t *) ((CONFIG_SYS_INIT_SP_ADDR) & ~0x07);
	/* compiler optimization barrier needed for GCC >= 3.4 */
	__asm__ __volatile__("": : :"memory");

	memset((void *)gd, 0, sizeof(gd_t));

	gd->mon_len = _bss_end_ofs;
#ifdef CONFIG_OF_EMBED
	/* Get a pointer to the FDT */
	gd->fdt_blob = _binary_dt_dtb_start;
#elif defined CONFIG_OF_SEPARATE
	/* FDT is at end of image */
	gd->fdt_blob = (void *)(_end_ofs + _TEXT_BASE);
#endif
	/* Allow the early environment to override the fdt address */
	gd->fdt_blob = (void *)getenv_ulong("fdtcontroladdr", 16,
						(uintptr_t)gd->fdt_blob);

	for (init_fnc_ptr = init_sequence; *init_fnc_ptr; ++init_fnc_ptr) {
		if ((*init_fnc_ptr)() != 0) {
			hang ();
		}
	}

#ifdef CONFIG_OF_CONTROL
	/* For now, put this check after the console is ready */
	if (fdtdec_prepare_fdt()) {
		panic("** CONFIG_OF_CONTROL defined but no FDT - please see "
			"doc/README.fdt-control");
	}
#endif

	debug("monitor len: %08lX\n", gd->mon_len);
	/*
	 * Ram is setup, size stored in gd !!
	 */
	debug("ramsize: %08lX\n", gd->ram_size);
#if defined(CONFIG_SYS_MEM_TOP_HIDE)
	/*
	 * Subtract specified amount of memory to hide so that it won't
	 * get "touched" at all by U-Boot. By fixing up gd->ram_size
	 * the Linux kernel should now get passed the now "corrected"
	 * memory size and won't touch it either. This should work
	 * for arch/ppc and arch/powerpc. Only Linux board ports in
	 * arch/powerpc with bootwrapper support, that recalculate the
	 * memory size from the SDRAM controller setup will have to
	 * get fixed.
	 */
	gd->ram_size -= CONFIG_SYS_MEM_TOP_HIDE;
#endif

	addr = CONFIG_SYS_SDRAM_BASE + gd->ram_size;

#ifdef CONFIG_LOGBUFFER
#ifndef CONFIG_ALT_LB_ADDR
	/* reserve kernel log buffer */
	addr -= (LOGBUFF_RESERVE);
	debug("Reserving %dk for kernel logbuffer at %08lx\n", LOGBUFF_LEN,
		addr);
#endif
#endif

#ifdef CONFIG_PRAM
	/*
	 * reserve protected RAM
	 */
	reg = getenv_ulong("pram", 10, CONFIG_PRAM);
	addr -= (reg << 10);		/* size is in kB */
	debug("Reserving %ldk for protected RAM at %08lx\n", reg, addr);
#endif /* CONFIG_PRAM */

#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
	/* reserve TLB table */
	addr -= (4096 * 4);

	/* round down to next 64 kB limit */
	addr &= ~(0x10000 - 1);

	gd->tlb_addr = addr;
	debug("TLB table at: %08lx\n", addr);
#endif

	/* round down to next 4 kB limit */
	addr &= ~(4096 - 1);
	debug("Top of RAM usable for U-Boot at: %08lx\n", addr);

#ifdef CONFIG_LCD
#ifdef CONFIG_FB_ADDR
	gd->fb_base = CONFIG_FB_ADDR;
#else
	/* reserve memory for LCD display (always full pages) */
	addr = lcd_setmem(addr);
	gd->fb_base = addr;
#endif /* CONFIG_FB_ADDR */
#endif /* CONFIG_LCD */

#ifndef CONFIG_SYS_SKIP_ARM_RELOCATION
	/*
	 * reserve memory for U-Boot code, data & bss
	 * round down to next 4 kB limit
	 */
	addr -= gd->mon_len;
	addr &= ~(4096 - 1);

	debug("Reserving %ldk for U-Boot at: %08lx\n", gd->mon_len >> 10, addr);
#endif

#ifndef CONFIG_SPL_BUILD
	/*
	 * reserve memory for malloc() arena
	 */
	addr_sp = addr - TOTAL_MALLOC_LEN;
	debug("Reserving %dk for malloc() at: %08lx\n",
			TOTAL_MALLOC_LEN >> 10, addr_sp);
	/*
	 * (permanently) allocate a Board Info struct
	 * and a permanent copy of the "global" data
	 */
	addr_sp -= sizeof (bd_t);
	bd = (bd_t *) addr_sp;
	gd->bd = bd;
	debug("Reserving %zu Bytes for Board Info at: %08lx\n",
			sizeof (bd_t), addr_sp);

#ifdef CONFIG_MACH_TYPE
	gd->bd->bi_arch_number = CONFIG_MACH_TYPE; /* board id for Linux */
#endif

	addr_sp -= sizeof (gd_t);
	id = (gd_t *) addr_sp;
	debug("Reserving %zu Bytes for Global Data at: %08lx\n",
			sizeof (gd_t), addr_sp);

#if defined(CONFIG_OF_SEPARATE) && defined(CONFIG_OF_CONTROL)
	/*
	 * If the device tree is sitting immediate above our image then we
	 * must relocate it. If it is embedded in the data section, then it
	 * will be relocated with other data.
	 */
	if (gd->fdt_blob) {
		fdt_size = ALIGN(fdt_totalsize(gd->fdt_blob) + 0x1000, 32);

		addr_sp -= fdt_size;
		new_fdt = (void *)addr_sp;
		debug("Reserving %zu Bytes for FDT at: %08lx\n",
		      fdt_size, addr_sp);
	}
#endif

	/* setup stackpointer for exeptions */
	gd->irq_sp = addr_sp;
#ifdef CONFIG_USE_IRQ
	addr_sp -= (CONFIG_STACKSIZE_IRQ+CONFIG_STACKSIZE_FIQ);
	debug("Reserving %zu Bytes for IRQ stack at: %08lx\n",
		CONFIG_STACKSIZE_IRQ+CONFIG_STACKSIZE_FIQ, addr_sp);
#endif
	/* leave 3 words for abort-stack    */
	addr_sp -= 12;

	/* 8-byte alignment for ABI compliance */
	addr_sp &= ~0x07;
#else
	addr_sp += 128;	/* leave 32 words for abort-stack   */
	gd->irq_sp = addr_sp;
#endif

	debug("New Stack Pointer is: %08lx\n", addr_sp);

#ifdef CONFIG_POST
	post_bootmode_init();
	post_run(NULL, POST_ROM | post_bootmode_get(0));
#endif

	gd->bd->bi_baudrate = gd->baudrate;
	/* Ram ist board specific, so move it to board code ... */
	dram_init_banksize();
	display_dram_config();	/* and display it */

#ifdef CONFIG_SYS_SKIP_ARM_RELOCATION
	gd->malloc_end = addr;
	addr = _TEXT_BASE;
#endif
	gd->relocaddr = addr;
	gd->start_addr_sp = addr_sp;
	gd->reloc_off = addr - _TEXT_BASE;
	debug("relocation Offset is: %08lx\n", gd->reloc_off);
	if (new_fdt) {
		memcpy(new_fdt, gd->fdt_blob, fdt_size);
		gd->fdt_blob = new_fdt;
	}
	memcpy(id, (void *)gd, sizeof(gd_t));

	relocate_code(addr_sp, id, addr);

	/* NOTREACHED - relocate_code() does not return */
}
예제 #25
0
파일: io.c 프로젝트: 119-org/hi3518-osdrv
/**
 * ubifs_wbuf_write_nolock - write data to flash via write-buffer.
 * @wbuf: write-buffer
 * @buf: node to write
 * @len: node length
 *
 * This function writes data to flash via write-buffer @wbuf. This means that
 * the last piece of the node won't reach the flash media immediately if it
 * does not take whole max. write unit (@c->max_write_size). Instead, the node
 * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or
 * because more data are appended to the write-buffer).
 *
 * This function returns zero in case of success and a negative error code in
 * case of failure. If the node cannot be written because there is no more
 * space in this logical eraseblock, %-ENOSPC is returned.
 */
int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
{
	struct ubifs_info *c = wbuf->c;
	int err, written, n, aligned_len = ALIGN(len, 8);

	dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
	       dbg_ntype(((struct ubifs_ch *)buf)->node_type),
	       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
	ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
	ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
	ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
	ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
	ubifs_assert(wbuf->size >= c->min_io_size);
	ubifs_assert(wbuf->size <= c->max_write_size);
	ubifs_assert(wbuf->size % c->min_io_size == 0);
	ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
	ubifs_assert(!c->ro_media && !c->ro_mount);
	ubifs_assert(!c->space_fixup);
	if (c->leb_size - wbuf->offs >= c->max_write_size)
		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));

	if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
		err = -ENOSPC;
		goto out;
	}

	cancel_wbuf_timer_nolock(wbuf);

	if (c->ro_error)
		return -EROFS;

	if (aligned_len <= wbuf->avail) {
		/*
		 * The node is not very large and fits entirely within
		 * write-buffer.
		 */
		memcpy(wbuf->buf + wbuf->used, buf, len);

		if (aligned_len == wbuf->avail) {
			dbg_io("flush jhead %s wbuf to LEB %d:%d",
			       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
			err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
					    wbuf->offs, wbuf->size,
					    wbuf->dtype);
			if (err)
				goto out;

			spin_lock(&wbuf->lock);
			wbuf->offs += wbuf->size;
			if (c->leb_size - wbuf->offs >= c->max_write_size)
				wbuf->size = c->max_write_size;
			else
				wbuf->size = c->leb_size - wbuf->offs;
			wbuf->avail = wbuf->size;
			wbuf->used = 0;
			wbuf->next_ino = 0;
			spin_unlock(&wbuf->lock);
		} else {
			spin_lock(&wbuf->lock);
			wbuf->avail -= aligned_len;
			wbuf->used += aligned_len;
			spin_unlock(&wbuf->lock);
		}

		goto exit;
	}

	written = 0;

	if (wbuf->used) {
		/*
		 * The node is large enough and does not fit entirely within
		 * current available space. We have to fill and flush
		 * write-buffer and switch to the next max. write unit.
		 */
		dbg_io("flush jhead %s wbuf to LEB %d:%d",
		       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
		memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
		err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
				    wbuf->size, wbuf->dtype);
		if (err)
			goto out;

		wbuf->offs += wbuf->size;
		len -= wbuf->avail;
		aligned_len -= wbuf->avail;
		written += wbuf->avail;
	} else if (wbuf->offs & (c->max_write_size - 1)) {
		/*
		 * The write-buffer offset is not aligned to
		 * @c->max_write_size and @wbuf->size is less than
		 * @c->max_write_size. Write @wbuf->size bytes to make sure the
		 * following writes are done in optimal @c->max_write_size
		 * chunks.
		 */
		dbg_io("write %d bytes to LEB %d:%d",
		       wbuf->size, wbuf->lnum, wbuf->offs);
		err = ubi_leb_write(c->ubi, wbuf->lnum, buf, wbuf->offs,
				    wbuf->size, wbuf->dtype);
		if (err)
			goto out;

		wbuf->offs += wbuf->size;
		len -= wbuf->size;
		aligned_len -= wbuf->size;
		written += wbuf->size;
	}

	/*
	 * The remaining data may take more whole max. write units, so write the
	 * remains multiple to max. write unit size directly to the flash media.
	 * We align node length to 8-byte boundary because we anyway flash wbuf
	 * if the remaining space is less than 8 bytes.
	 */
	n = aligned_len >> c->max_write_shift;
	if (n) {
		n <<= c->max_write_shift;
		dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
		       wbuf->offs);
		err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written,
				    wbuf->offs, n, wbuf->dtype);
		if (err)
			goto out;
		wbuf->offs += n;
		aligned_len -= n;
		len -= n;
		written += n;
	}

	spin_lock(&wbuf->lock);
	if (aligned_len)
		/*
		 * And now we have what's left and what does not take whole
		 * max. write unit, so write it to the write-buffer and we are
		 * done.
		 */
		memcpy(wbuf->buf, buf + written, len);

	if (c->leb_size - wbuf->offs >= c->max_write_size)
		wbuf->size = c->max_write_size;
	else
		wbuf->size = c->leb_size - wbuf->offs;
	wbuf->avail = wbuf->size - aligned_len;
	wbuf->used = aligned_len;
	wbuf->next_ino = 0;
	spin_unlock(&wbuf->lock);

exit:
	if (wbuf->sync_callback) {
		int free = c->leb_size - wbuf->offs - wbuf->used;

		err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
		if (err)
			goto out;
	}

	if (wbuf->used)
		new_wbuf_timer_nolock(wbuf);

	return 0;

out:
	ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
		  len, wbuf->lnum, wbuf->offs, err);
	dbg_dump_node(c, buf);
	dbg_dump_stack();
	dbg_dump_leb(c, wbuf->lnum);
	return err;
}
예제 #26
0
파일: reserve_mem.c 프로젝트: anewkirk/AJK
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);
			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif

			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#ifdef CONFIG_USE_MFC_CMA
#if defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else
#elif defined(CONFIG_MACH_GC1)
					if (reg->start == 0x50400000) {
						if (memblock_reserve(0x50400000,
								0x400000))
							panic("memblock\n");
						if (memblock_reserve(0x53000000,
								0x500000))
							panic("memblock\n");
					} else
#endif
					{
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");
#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);
				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
예제 #27
0
sample_t* Buffer::getBuffer() const
{
	return (sample_t*) ALIGN(m_buffer);
}
예제 #28
0
파일: log.c 프로젝트: Scorpio92/mstar6a918
/**
 * ubifs_log_start_commit - start commit.
 * @c: UBIFS file-system description object
 * @ltail_lnum: return new log tail LEB number
 *
 * The commit operation starts with writing "commit start" node to the log and
 * reference nodes for all journal heads which will define new journal after
 * the commit has been finished. The commit start and reference nodes are
 * written in one go to the nearest empty log LEB (hence, when commit is
 * finished UBIFS may safely unmap all the previous log LEBs). This function
 * returns zero in case of success and a negative error code in case of
 * failure.
 */
int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
{
	void *buf;
	struct ubifs_cs_node *cs;
	struct ubifs_ref_node *ref;
	int err, i, max_len, len;

	err = dbg_check_bud_bytes(c);
	if (err)
		return err;

	max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
	max_len = ALIGN(max_len, c->min_io_size);
	buf = cs = kmalloc(max_len, GFP_NOFS);
	if (!buf)
		return -ENOMEM;

	cs->ch.node_type = UBIFS_CS_NODE;
	cs->cmt_no = cpu_to_le64(c->cmt_no);
	ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);

	/*
	 * Note, we do not lock 'c->log_mutex' because this is the commit start
	 * phase and we are exclusively using the log. And we do not lock
	 * write-buffer because nobody can write to the file-system at this
	 * phase.
	 */

	len = UBIFS_CS_NODE_SZ;
	for (i = 0; i < c->jhead_cnt; i++) {
		int lnum = c->jheads[i].wbuf.lnum;
		int offs = c->jheads[i].wbuf.offs;

		if (lnum == -1 || offs == c->leb_size)
			continue;

		dbg_log("add ref to LEB %d:%d for jhead %s",
			lnum, offs, dbg_jhead(i));
		ref = buf + len;
		ref->ch.node_type = UBIFS_REF_NODE;
		ref->lnum = cpu_to_le32(lnum);
		ref->offs = cpu_to_le32(offs);
		ref->jhead = cpu_to_le32(i);

		ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
		len += UBIFS_REF_NODE_SZ;
	}

	ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);

	#if defined(CONFIG_UBIFS_FS_FULL_USE_LOG) && (MP_NAND_UBIFS == 1)
	/* Not Switch to next log LEB, programming next available page in the same log LEB continuously*/
	/* if available page is in the end of the LEB, switch to next LEB*/

	if(c->lhead_offs >= (c->leb_size - (c->min_io_size * 4)) )
	{
		c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
		c->lhead_offs = 0;
	}
	
	if (c->lhead_offs == 0) {
		/* Must ensure next LEB has been unmapped */
		err = ubifs_leb_unmap(c, c->lhead_lnum);
		if (err)
			goto out;
	}

	len = ALIGN(len, c->min_io_size);
	dbg_log("writing commit start at LEB %d:%d, len %d", c->lhead_lnum, c->lhead_offs, len);
	err = ubifs_leb_write(c, c->lhead_lnum, cs, c->lhead_offs, len, UBI_SHORTTERM);
	if (err)
		goto out;
	#else
	/* Switch to the next log LEB */
	if (c->lhead_offs) {
		c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
		c->lhead_offs = 0;
	}

	if (c->lhead_offs == 0) {
		/* Must ensure next LEB has been unmapped */
		err = ubifs_leb_unmap(c, c->lhead_lnum);
		if (err)
			goto out;
	}

	len = ALIGN(len, c->min_io_size);
	dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
	err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM);
	if (err)
		goto out;
	#endif

	*ltail_lnum = c->lhead_lnum;

	c->lhead_offs += len;
	if (c->lhead_offs == c->leb_size) {
		c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
		c->lhead_offs = 0;
	}

	remove_buds(c);

	/*
	 * We have started the commit and now users may use the rest of the log
	 * for new writes.
	 */
	c->min_log_bytes = 0;

out:
	kfree(buf);
	return err;
}
예제 #29
0
static int imx_bbu_nand_update(struct bbu_handler *handler, struct bbu_data *data)
{
	struct imx_nand_fcb_bbu_handler *imx_handler =
		container_of(handler, struct imx_nand_fcb_bbu_handler, handler);
	struct cdev *bcb_cdev;
	struct mtd_info *mtd;
	int ret, i;
	struct fcb_block *fcb = NULL;
	void *fw = NULL, *fw_orig = NULL;
	unsigned fw_size, partition_size;
	enum filetype filetype;
	unsigned num_blocks_fw;
	int pages_per_block;
	int used = 0;
	int fw_orig_len;
	int used_refresh = 0, unused_refresh = 0;

	if (data->image) {
		filetype = file_detect_type(data->image, data->len);

		if (filetype != imx_handler->filetype &&
			!bbu_force(data, "Image is not of type %s but of type %s",
				file_type_to_string(imx_handler->filetype),
				file_type_to_string(filetype)))
			return -EINVAL;
	}

	bcb_cdev = cdev_by_name(handler->devicefile);
	if (!bcb_cdev) {
		pr_err("%s: No FCB device!\n", __func__);
		return -ENODEV;
	}

	mtd = bcb_cdev->mtd;
	partition_size = mtd->size;
	pages_per_block = mtd->erasesize / mtd->writesize;

	for (i = 0; i < 4; i++) {
		read_fcb(mtd, i, &fcb);
		if (fcb)
			break;
	}

	/*
	 * This code uses the following layout in the Nand flash:
	 *
	 * fwmaxsize = (n_blocks - 4) / 2
	 *
	 * block
	 *
	 * 0              ----------------------
	 *                | FCB/DBBT 0         |
	 * 1              ----------------------
	 *                | FCB/DBBT 1         |
	 * 2              ----------------------
	 *                | FCB/DBBT 2         |
	 * 3              ----------------------
	 *                | FCB/DBBT 3         |
	 * 4              ----------------------
	 *                | Firmware slot 0    |
	 * 4 + fwmaxsize  ----------------------
	 *                | Firmware slot 1    |
	 *                ----------------------
	 *
	 * We want a robust update in which a power failure may occur
	 * everytime without bricking the board, so here's the strategy:
	 *
	 * The FCBs contain pointers to the firmware slots in the
	 * Firmware1_startingPage and Firmware2_startingPage fields. Note that
	 * Firmware1_startingPage doesn't necessarily point to slot 0. We
	 * exchange the pointers during update to atomically switch between the
	 * old and the new firmware.
	 *
	 * - We read the first valid FCB and the firmware slots.
	 * - We check which firmware slot is currently used by the ROM:
	 *    - if no FCB is found or its layout differs from the above layout,
	 *      continue without robust update
	 *   - if only one firmware slot is readable, the ROM uses it
	 *   - if both slots are readable, the ROM will use slot 0
	 * - Step 1: erase/update the slot currently unused by the ROM
	 * - Step 2: Update FCBs/DBBTs, thereby letting Firmware1_startingPage
	 *           point to the slot we just updated. From this moment
	 *           on the new firmware will be used and running a
	 *           refresh/repair after a power failure after this
	 *           step will complete the update.
	 * - Step 3: erase/update the other firmwre slot
	 * - Step 4: Eventually write FCBs/DBBTs again. This may become
	 *           necessary when step 3 revealed new bad blocks.
	 *
	 * This robust update only works when the original FCBs on the device
	 * uses the same layout as this code does. In other cases update will
	 * also work, but it won't be robust against power failures.
	 *
	 * Refreshing the firmware which is needed when blocks become unreadable
	 * due to read disturbance works the same way, only that the new firmware
	 * is the same as the old firmware and that it will only be written when
	 * reading from the device returns -EUCLEAN indicating that a block needs
	 * to be rewritten.
	 */
	if (fcb)
		read_firmware_all(mtd, fcb, &fw_orig, &fw_orig_len,
				  &used_refresh, &unused_refresh, &used);

	if (data->image) {
		/*
		 * We have to write one additional page to make the ROM happy.
		 * Maybe the PagesInFirmwarex fields are really the number of pages - 1.
		 * kobs-ng has the same.
		 */
		fw_size = ALIGN(data->len + mtd->writesize, mtd->writesize);
		fw = xzalloc(fw_size);
		memcpy(fw, data->image, data->len);
		free(fw_orig);
		used_refresh = 1;
		unused_refresh = 1;

		free(fcb);
		fcb = xzalloc(sizeof(*fcb));
		fcb->Firmware1_startingPage = imx_bbu_firmware_start_block(mtd, !used) * pages_per_block;
		fcb->Firmware2_startingPage = imx_bbu_firmware_start_block(mtd, used) * pages_per_block;
		fcb->PagesInFirmware1 = fw_size / mtd->writesize;
		fcb->PagesInFirmware2 = fcb->PagesInFirmware1;

		fcb_create(imx_handler, fcb, mtd);
	} else {
		if (!fcb) {
			pr_err("No FCB found on device, cannot refresh\n");
			ret = -EINVAL;
			goto out;
		}

		if (!fw_orig) {
			pr_err("No firmware found on device, cannot refresh\n");
			ret = -EINVAL;
			goto out;
		}

		fw = fw_orig;
		fw_size = fw_orig_len;
		pr_info("Refreshing existing firmware\n");
	}

	num_blocks_fw = imx_bbu_firmware_max_blocks(mtd);

	if (num_blocks_fw * mtd->erasesize < fw_size) {
		pr_err("Not enough space for update\n");
		return -ENOSPC;
	}

	ret = bbu_confirm(data);
	if (ret)
		goto out;

	/* Step 1: write firmware which is currently unused by the ROM */
	if (unused_refresh) {
		pr_info("%sing slot %d\n", data->image ? "updat" : "refresh", !used);
		ret = imx_bbu_write_firmware(mtd, !used, fw, fw_size);
		if (ret < 0)
			goto out;
	} else {
		pr_info("firmware slot %d still ok, nothing to do\n", !used);
	}

	/*
	 * Step 2: Write FCBs/DBBTs. This will use the firmware we have
	 * just written as primary firmware. From now on the new
	 * firmware will be booted.
	 */
	ret = imx_bbu_write_fcbs_dbbts(mtd, fcb);
	if (ret < 0)
		goto out;

	/* Step 3: Write the secondary firmware */
	if (used_refresh) {
		pr_info("%sing slot %d\n", data->image ? "updat" : "refresh", used);
		ret = imx_bbu_write_firmware(mtd, used, fw, fw_size);
		if (ret < 0)
			goto out;
	} else {
		pr_info("firmware slot %d still ok, nothing to do\n", used);
	}

	/*
	 * Step 4: If writing the secondary firmware discovered new bad
	 * blocks, write the FCBs/DBBTs again with updated bad block
	 * information.
	 */
	if (ret > 0) {
		pr_info("New bad blocks detected, writing FCBs/DBBTs again\n");
		ret = imx_bbu_write_fcbs_dbbts(mtd, fcb);
		if (ret < 0)
			goto out;
	}

out:
	free(fw);
	free(fcb);

	return ret;
}
예제 #30
0
/*
 * Reads the security data from the metadata resource of a WIM image.
 *
 * @buf
 *	Buffer containing an uncompressed WIM metadata resource.
 * @buf_len
 *	Length of the uncompressed metadata resource, in bytes.
 * @sd_ret
 *	On success, a pointer to the resulting security data structure will be
 *	returned here.
 *
 * Note: There is no `offset' argument because the security data is located at
 * the beginning of the metadata resource.
 *
 * Return values:
 *	WIMLIB_ERR_SUCCESS (0)
 *	WIMLIB_ERR_INVALID_METADATA_RESOURCE
 *	WIMLIB_ERR_NOMEM
 */
int
read_wim_security_data(const u8 *buf, size_t buf_len,
		       struct wim_security_data **sd_ret)
{
	struct wim_security_data *sd;
	int ret;
	u64 total_len;
	u64 sizes_size;
	u64 size_no_descriptors;
	const struct wim_security_data_disk *sd_disk;
	const u8 *p;

	if (buf_len < 8)
		return WIMLIB_ERR_INVALID_METADATA_RESOURCE;

	sd = new_wim_security_data();
	if (!sd)
		goto out_of_memory;

	sd_disk = (const struct wim_security_data_disk *)buf;
	sd->total_length = ALIGN(le32_to_cpu(sd_disk->total_length), 8);
	sd->num_entries = le32_to_cpu(sd_disk->num_entries);

	/* Length field of 0 is a special case that really means length
	 * of 8. */
	if (sd->total_length == 0)
		sd->total_length = 8;

	/* The security_id field of each dentry is a signed 32-bit integer, so
	 * the possible indices into the security descriptors table are 0
	 * through 0x7fffffff.  Which means 0x80000000 security descriptors
	 * maximum.  Not like you should ever have anywhere close to that many
	 * security descriptors! */
	if (sd->num_entries > 0x80000000)
		goto out_invalid_sd;

	/* Verify the listed total length of the security data is big enough to
	 * include the sizes array, verify that the file data is big enough to
	 * include it as well, then allocate the array of sizes.
	 *
	 * Note: The total length of the security data must fit in a 32-bit
	 * integer, even though each security descriptor size is a 64-bit
	 * integer.  This is stupid, and we need to be careful not to actually
	 * let the security descriptor sizes be over 0xffffffff.  */
	if (sd->total_length > buf_len)
		goto out_invalid_sd;

	sizes_size = (u64)sd->num_entries * sizeof(u64);
	size_no_descriptors = 8 + sizes_size;
	if (size_no_descriptors > sd->total_length)
		goto out_invalid_sd;

	total_len = size_no_descriptors;

	/* Return immediately if no security descriptors. */
	if (sd->num_entries == 0)
		goto out_descriptors_ready;

	/* Allocate a new buffer for the sizes array */
	sd->sizes = MALLOC(sizes_size);
	if (!sd->sizes)
		goto out_of_memory;

	/* Copy the sizes array into the new buffer */
	for (u32 i = 0; i < sd->num_entries; i++) {
		sd->sizes[i] = le64_to_cpu(sd_disk->sizes[i]);
		if (sd->sizes[i] > 0xffffffff)
			goto out_invalid_sd;
	}

	p = (const u8*)sd_disk + size_no_descriptors;

	/* Allocate the array of pointers to the security descriptors, then read
	 * them into separate buffers. */
	sd->descriptors = CALLOC(sd->num_entries, sizeof(sd->descriptors[0]));
	if (!sd->descriptors)
		goto out_of_memory;

	for (u32 i = 0; i < sd->num_entries; i++) {
		if (sd->sizes[i] == 0)
			continue;
		total_len += sd->sizes[i];
		if (total_len > (u64)sd->total_length)
			goto out_invalid_sd;
		sd->descriptors[i] = memdup(p, sd->sizes[i]);
		if (!sd->descriptors[i])
			goto out_of_memory;
		p += sd->sizes[i];
	}
out_descriptors_ready:
	if (ALIGN(total_len, 8) != sd->total_length) {
		WARNING("Stored WIM security data total length was "
			"%"PRIu32" bytes, but calculated %"PRIu32" bytes",
			sd->total_length, (u32)total_len);
	}
	*sd_ret = sd;
	ret = 0;
	goto out;
out_invalid_sd:
	ERROR("WIM security data is invalid!");
	ret = WIMLIB_ERR_INVALID_METADATA_RESOURCE;
	goto out_free_sd;
out_of_memory:
	ERROR("Out of memory while reading WIM security data!");
	ret = WIMLIB_ERR_NOMEM;
out_free_sd:
	free_wim_security_data(sd);
out:
	return ret;
}