Пример #1
0
wwd_result_t  wwd_bus_write_wifi_firmware_image( void )
{
    uint32_t offset = 0;
    uint32_t total_size;
    uint32_t reset_inst = 0;
    uint32_t size_read;

    total_size = (uint32_t) resource_get_size( &wifi_firmware_image );

    resource_read ( &wifi_firmware_image, 0, 4, &size_read, &reset_inst );

    while ( total_size > offset )
    {
        resource_read ( &wifi_firmware_image, 0, WLAN_MEMORY_SIZE, &size_read, (uint8_t *)(WLAN_ADDR+offset) );
        offset += size_read;
    }

    /*
     * copy reset vector to FLOPS
     * WLAN Address = {Programmable Register[31:18],
     * Current Transaction's HADDR[17:0]}
     */
    write_reset_instruction( reset_inst );

    return WWD_SUCCESS;
}
Пример #2
0
/******************************************************
 *               Function Definitions
 ******************************************************/

wwd_result_t host_platform_resource_size( wwd_resource_t resource, uint32_t* size_out )
{
    if ( resource == WWD_RESOURCE_WLAN_FIRMWARE )
    {
        *size_out = (uint32_t) resource_get_size( &wifi_firmware_image );
    }
    else
    {
        *size_out = NVRAM_SIZE;
    }
    return WWD_SUCCESS;
}

#if defined( WWD_DIRECT_RESOURCES )
wwd_result_t host_platform_resource_read_direct( wwd_resource_t resource, const void** ptr_out )
{
    if ( resource == WWD_RESOURCE_WLAN_FIRMWARE )
    {
        *ptr_out = wifi_firmware_image.val.mem.data;
    }
    else
    {
        *ptr_out = NVRAM_IMAGE_VARIABLE;
    }
    return WWD_SUCCESS;
}
#else /* ! defined( WWD_DIRECT_RESOURCES ) */
wwd_result_t host_platform_resource_read_indirect( wwd_resource_t resource, uint32_t offset, void* buffer, uint32_t buffer_size, uint32_t* size_out )
{
    if ( resource == WWD_RESOURCE_WLAN_FIRMWARE )
    {
        return resource_read( &wifi_firmware_image, offset, buffer_size, size_out, buffer );
    }
    else
    {
        *size_out = MIN( buffer_size, NVRAM_SIZE - offset );
        memcpy( buffer, &NVRAM_IMAGE_VARIABLE[ offset ], *size_out );
        return WWD_SUCCESS;
    }
}
Пример #3
0
wwd_result_t wwd_process_clm_data(void)
{
    wwd_result_t ret = WWD_SUCCESS;
    uint32_t clm_blob_size;
    uint32_t datalen;
    unsigned int size2alloc, data_offset;
    unsigned char *chunk_buf;
    uint16_t dl_flag = DL_BEGIN;
    unsigned int cumulative_len = 0;
    unsigned int chunk_len;
    uint32_t size_read;

    /* clm file size is the initial datalen value which is decremented */
    clm_blob_size = resource_get_size( &wifi_firmware_clm_blob );
    datalen = clm_blob_size;

    data_offset = offsetof(wl_dload_data_t, data);
    size2alloc = data_offset + MAX_CHUNK_LEN;

    if ((chunk_buf = (unsigned char *)malloc(size2alloc)) != NULL) {
        memset(chunk_buf, 0, size2alloc);

        do {
            if (datalen >= MAX_CHUNK_LEN)
                chunk_len = MAX_CHUNK_LEN;
            else
                chunk_len = datalen;

            // check size_read is full value also and resource read return
            if ((ret = resource_read(&wifi_firmware_clm_blob, cumulative_len, chunk_len, &size_read, chunk_buf + data_offset)) != WWD_SUCCESS) {
                break;
            }

            if (size_read != chunk_len) {
                wiced_assert("During CLM download, resource_read() returned less of the file than should be available\n", 1 == 0);
                ret = WWD_CLM_BLOB_DLOAD_ERROR;
                break;
            }

            cumulative_len += chunk_len;

            if (datalen - chunk_len == 0)
                dl_flag |= DL_END;

            ret = wwd_download2dongle(WWD_STA_INTERFACE, IOVAR_STR_CLMLOAD, dl_flag, DL_TYPE_CLM,
                chunk_buf, data_offset + chunk_len);
            dl_flag &= (uint16_t)~DL_BEGIN;

            datalen = datalen - chunk_len;
        } while ((datalen > 0) && (ret == WWD_SUCCESS));

        free(chunk_buf);
        if ( ret != WWD_SUCCESS )
        {
            wwd_result_t ret_clmload_status;
            wiced_buffer_t buffer;
            wiced_buffer_t response;
            void *data;

            WPRINT_WWD_DEBUG(("clmload (%ld byte file) failed with return %d; ", clm_blob_size, ret));
            data = (int*)wwd_sdpcm_get_iovar_buffer( &buffer, 4, IOVAR_STR_CLMLOAD_STATUS );
            CHECK_IOCTL_BUFFER( data );
            ret_clmload_status = wwd_sdpcm_send_iovar( SDPCM_GET, buffer, &response, WWD_STA_INTERFACE );
            if ( ret_clmload_status != WWD_SUCCESS )
            {
                WPRINT_WWD_DEBUG(("clmload_status failed with return %d\n", ret_clmload_status));
            }
            else
            {
                uint32_t *clmload_status = (uint32_t *)host_buffer_get_current_piece_data_pointer( response );

                if ( clmload_status != NULL )
                {
                    WPRINT_WWD_DEBUG(("clmload_status is %lu\n", *clmload_status));
                    host_buffer_release( response, WWD_NETWORK_RX );
                }
            }
        }
    } else {
        ret = WWD_MALLOC_FAILURE;
    }

    return ret;
}
Пример #4
0
static void
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
	struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
	unsigned i, prims, buffers = 0;

	/* if we supported transform feedback, we'd have to disable this: */
	if (((scissor->maxx - scissor->minx) *
			(scissor->maxy - scissor->miny)) == 0) {
		return;
	}

	/* TODO: push down the region versions into the tiles */
	if (!fd_render_condition_check(pctx))
		return;

	/* emulate unsupported primitives: */
	if (!fd_supported_prim(ctx, info->mode)) {
		if (ctx->streamout.num_targets > 0)
			debug_error("stream-out with emulated prims");
		util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
		util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
		util_primconvert_draw_vbo(ctx->primconvert, info);
		return;
	}

	ctx->needs_flush = true;

	/*
	 * Figure out the buffers/features we need:
	 */

	if (fd_depth_enabled(ctx)) {
		buffers |= FD_BUFFER_DEPTH;
		resource_written(ctx, pfb->zsbuf->texture);
		ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
	}

	if (fd_stencil_enabled(ctx)) {
		buffers |= FD_BUFFER_STENCIL;
		resource_written(ctx, pfb->zsbuf->texture);
		ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
	}

	if (fd_logicop_enabled(ctx))
		ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;

	for (i = 0; i < pfb->nr_cbufs; i++) {
		struct pipe_resource *surf;

		if (!pfb->cbufs[i])
			continue;

		surf = pfb->cbufs[i]->texture;

		resource_written(ctx, surf);
		buffers |= PIPE_CLEAR_COLOR0 << i;

		if (surf->nr_samples > 1)
			ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED;

		if (fd_blend_enabled(ctx, i))
			ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED;
	}

	/* Skip over buffer 0, that is sent along with the command stream */
	for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
		resource_read(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
		resource_read(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
	}

	/* Mark VBOs as being read */
	for (i = 0; i < ctx->vtx.vertexbuf.count; i++) {
		assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
		resource_read(ctx, ctx->vtx.vertexbuf.vb[i].buffer);
	}

	/* Mark index buffer as being read */
	resource_read(ctx, ctx->indexbuf.buffer);

	/* Mark textures as being read */
	for (i = 0; i < ctx->verttex.num_textures; i++)
		if (ctx->verttex.textures[i])
			resource_read(ctx, ctx->verttex.textures[i]->texture);
	for (i = 0; i < ctx->fragtex.num_textures; i++)
		if (ctx->fragtex.textures[i])
			resource_read(ctx, ctx->fragtex.textures[i]->texture);

	/* Mark streamout buffers as being written.. */
	for (i = 0; i < ctx->streamout.num_targets; i++)
		if (ctx->streamout.targets[i])
			resource_written(ctx, ctx->streamout.targets[i]->buffer);

	ctx->num_draws++;

	prims = u_reduced_prims_for_vertices(info->mode, info->count);

	ctx->stats.draw_calls++;

	/* TODO prims_emitted should be clipped when the stream-out buffer is
	 * not large enough.  See max_tf_vtx().. probably need to move that
	 * into common code.  Although a bit more annoying since a2xx doesn't
	 * use ir3 so no common way to get at the pipe_stream_output_info
	 * which is needed for this calculation.
	 */
	if (ctx->streamout.num_targets > 0)
		ctx->stats.prims_emitted += prims;
	ctx->stats.prims_generated += prims;

	/* any buffers that haven't been cleared yet, we need to restore: */
	ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared);
	/* and any buffers used, need to be resolved: */
	ctx->resolve |= buffers;

	DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws,
		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
		util_format_short_name(pipe_surface_format(pfb->zsbuf)));

	fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW);
	ctx->draw_vbo(ctx, info);

	for (i = 0; i < ctx->streamout.num_targets; i++)
		ctx->streamout.offsets[i] += info->count;

	if (fd_mesa_debug & FD_DBG_DDRAW)
		ctx->dirty = 0xffffffff;

	/* if an app (or, well, piglit test) does many thousands of draws
	 * without flush (or anything which implicitly flushes, like
	 * changing render targets), we can exceed the ringbuffer size.
	 * Since we don't currently have a sane way to wrapparound, and
	 * we use the same buffer for both draw and tiling commands, for
	 * now we need to do this hack and trigger flush if we are running
	 * low on remaining space for cmds:
	 */
	if (((ctx->ring->cur - ctx->ring->start) >
				(ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) ||
			(fd_mesa_debug & FD_DBG_FLUSH))
		fd_context_render(pctx);
}