void sh_css_refcount_clear(int32_t id, void (*clear_func)(hrt_vaddress ptr)) { struct sh_css_refcount_entry *entry; uint32_t i; uint32_t count = 0; assert_exit(clear_func != NULL); sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear(%x)\n", id); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if ((entry->data != mmgr_NULL) && (entry->id == id)) { sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear:" " %x: 0x%x\n", id, entry->data); if (clear_func) { /* clear using provided function */ clear_func(entry->data); } else { sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear: " "using mmgr_free: no clear_func\n"); mmgr_free(entry->data); } assert_exit(entry->count == 0); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; count++; } } sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear(%x): cleared %d\n", id, count); }
bool sh_css_param_shading_table_store( hrt_vaddress isp_sc_tbl, unsigned int sensor_binning, bool raw_binning, const struct sh_css_binary *binary) { struct sh_css_shading_table_isp *tmp_sc_table_isp; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_store() enter:\n"); /* shading table is full resolution, reduce */ sh_css_param_shading_table_prepare(sc_table, sensor_binning, raw_binning, &tmp_sc_table_isp, binary); mmgr_store(isp_sc_tbl, sh_table_entry(tmp_sc_table_isp, 0, 0, 0), SH_CSS_SC_NUM_COLORS * tmp_sc_table_isp->height * tmp_sc_table_isp->stride * sizeof(short)); sh_css_free(tmp_sc_table_isp->data); sh_css_free(tmp_sc_table_isp); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_store() leave:\n"); return true; }
hrt_vaddress sh_css_refcount_alloc( int32_t id, const size_t size, const uint16_t attribute) { hrt_vaddress ptr; struct sh_css_refcount_entry *entry = NULL; uint32_t i; assert(size > 0); assert(id != FREE_BUF_CACHE); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if ((entry->id == FREE_BUF_CACHE) && (entry->size == size)) { entry->id = id; assert(entry->count == 0); entry->count = 1; assert(entry->data != mmgr_NULL); if (attribute & MMGR_ATTRIBUTE_CLEARED) mmgr_clear(entry->data, size); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_alloc(%x) 0x%x " "reused from cache, refcnt %d\n", id, entry->data, entry->count); return entry->data; } } ptr = mmgr_alloc_attr(size, attribute); assert(ptr != mmgr_NULL); /* This address should not exist in the administration yet */ assert(!find_entry(ptr)); entry = find_free_entry(ptr); assert(entry != NULL); if (entry == NULL) return mmgr_NULL; assert(entry->data == mmgr_NULL); entry->id = id; entry->data = ptr; entry->size = size; entry->count = 1; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_alloc(%x) 0x%x " "new alloc refcnt %d\n", id, ptr, entry->count); return ptr; }
hrt_vaddress sh_css_refcount_retain(int32_t id, hrt_vaddress ptr) { struct sh_css_refcount_entry *entry; entry = find_entry(ptr, false); sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_retain(%x) 0x%x\n", id, ptr); if (!entry) { entry = find_entry(ptr, true); if (!entry) return mmgr_NULL; entry->id = id; } assert_exit_code(entry->id == id, mmgr_NULL); if (entry->data == ptr) entry->count += 1; else if (entry->data == mmgr_NULL) { entry->data = ptr; entry->count = 1; } else return mmgr_NULL; return ptr; }
bool sh_css_refcount_release(int32_t id, hrt_vaddress ptr) { struct sh_css_refcount_entry *entry; sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_release(%x) 0x%x\n", id, ptr); if (ptr == mmgr_NULL) return false; entry = find_entry(ptr, false); if (entry) { assert_exit_code(entry->id == id, false); if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_release: freeing\n");*/ mmgr_free(ptr); entry->data = mmgr_NULL; entry->id = 0; } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ assert_exit_code(false, false); return false; }
enum ia_css_err ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe, unsigned int or_mask, unsigned int and_mask) { const struct ia_css_fw_info *fw = &sh_css_sp_fw; unsigned int HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com; unsigned int offset; struct sh_css_event_irq_mask event_irq_mask; (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ sh_css_dtrace(SH_DBG_TRACE, "ia_css_pipe_set_irq_mask(" "or_mask=%x, and_mask=%x)\n", or_mask, and_mask); assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES); /*assert(or_mask <= UINT16_MAX);*/ /*assert(and_mask <= UINT16_MAX);*/ event_irq_mask.or_mask = (uint16_t)or_mask; event_irq_mask.and_mask = (uint16_t)and_mask; offset = offsetof(struct host_sp_communication, host2sp_event_irq_mask[ia_css_pipe_get_pipe_num(pipe)]); assert(offset % HRT_BUS_BYTES == 0); sp_dmem_store(SP0_ID, (unsigned int)sp_address_of(host_sp_com) + offset, &event_irq_mask, sizeof(event_irq_mask)); return IA_CSS_SUCCESS; }
enum ia_css_err ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe, unsigned int *or_mask, unsigned int *and_mask) { const struct ia_css_fw_info *fw = &sh_css_sp_fw; unsigned int HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com; unsigned int offset; struct sh_css_event_irq_mask event_irq_mask; (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ sh_css_dtrace(SH_DBG_TRACE, "ia_css_event_get_irq_mask()\n"); assert(pipe); assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES); offset = offsetof(struct host_sp_communication, host2sp_event_irq_mask[ia_css_pipe_get_pipe_num(pipe)]); assert(offset % HRT_BUS_BYTES == 0); sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(host_sp_com) + offset, &event_irq_mask, sizeof(event_irq_mask)); if (or_mask) *or_mask = event_irq_mask.or_mask; if (and_mask) *and_mask = event_irq_mask.and_mask; return IA_CSS_SUCCESS; }
bool sh_css_param_shading_table_set( const struct sh_css_shading_table *table) { /* input can be NULL ?? */ sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_params_shading_table_set() enter:\n"); if (table != sc_table) { sc_table = table; sc_table_changed = true; } sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_params_shading_table_set() leave:\n"); return sc_table_changed; }
void check_aquire(ia_css_resource_type_t resource_type, uint32_t* resource_id,uint32_t expected, bool shouldfail) { enum ia_css_err rval = 0; if (!shouldfail) { rval = ia_css_i_host_rmgr_acq_gen(resource_type, resource_id); if(rval !=IA_CSS_SUCCESS) sh_css_dtrace(SH_DBG_ERROR, "Failed rval = %d\n", rval); if (*resource_id != expected) while(1);//sh_css_dtrace(SH_DBG_ERROR,"Failed: rval = %d ... expected = %d\n",*resource_id, expected); else sh_css_dtrace(SH_DBG_ERROR, "Succes: resourcetype = %x - Resource %d acquired\n", resource_type, *resource_id); } else { if (IA_CSS_ERR_RESOURCE_EXHAUSTED == ia_css_i_host_rmgr_acq_gen(resource_type, resource_id)) sh_css_dtrace(SH_DBG_ERROR, "Succes: resource exhausted\n"); else while(1);// sh_css_dtrace(SH_DBG_ERROR, "Failed: error not what expected\n"); } }
bool sh_css_refcount_release(int32_t id, hrt_vaddress ptr) { struct sh_css_refcount_entry *entry; assert(id != FREE_BUF_CACHE); if (ptr == mmgr_NULL) return false; entry = find_entry(ptr); if (entry) { assert(entry->id == id); if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_release: freeing\n");*/ entry->id = FREE_BUF_CACHE; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_release(%x) 0x%x " "new refcnt 0, returned to cache\n", id, ptr); } else { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_release(%x) 0x%x " "new refcnt %d\n", id, ptr, entry->count ); } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_release(%x) 0x%x ERROR not managed\n", id, ptr); assert(false); return false; }
void sh_css_refcount_uninit(void) { struct sh_css_refcount_entry *entry; uint32_t i; sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_uninit() enter\n"); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if (entry->data != mmgr_NULL) { /* sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_uninit: freeing (%x)\n", entry->data);*/ mmgr_free(entry->data); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; } } sh_css_free(myrefcount.items); myrefcount.items = NULL; myrefcount.size = 0; sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_uninit() leave\n"); }
void sh_css_shading_table_free(struct sh_css_shading_table *table) { unsigned int i; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_shading_table_free() enter:\n"); if (table == NULL) { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_shading_table_free() leave:\n"); return; } for (i = 0; i < SH_CSS_SC_NUM_COLORS; i++) { if (table->data[i]) sh_css_free(table->data[i]); } sh_css_free(table); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_shading_table_free() leave:\n"); }
struct sh_css_shading_table * sh_css_shading_table_alloc( unsigned int width, unsigned int height) { unsigned int i; struct sh_css_shading_table *me = sh_css_malloc(sizeof(*me)); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_shading_table_alloc() enter:\n"); if (me == NULL) { /* Checkpatch patch */ return me; } me->width = width; me->height = height; me->sensor_width = 0; me->sensor_height = 0; me->fraction_bits = 0; for (i = 0; i < SH_CSS_SC_NUM_COLORS; i++) { me->data[i] = sh_css_malloc(width * height * sizeof(*me->data[0])); if (me->data[i] == NULL) { unsigned int j; for (j = 0; j < i; j++) sh_css_free(me->data[j]); sh_css_free(me); return NULL; } } sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_shading_table_alloc() leave:\n"); return me; }
void sh_css_refcount_clear(int32_t id, void (*clear_func)(hrt_vaddress ptr)) { struct sh_css_refcount_entry *entry; uint32_t i; uint32_t count = 0; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear(%x)\n", id); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if ((entry->data != mmgr_NULL) && (entry->id == id)) { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear: %x: 0x%x refcnt %d\n", id, entry->data, entry->count); if (clear_func) { /* clear using provided function */ /* This function will update the entry */ /* administration (we should not do that) */ clear_func(entry->data); assert(entry->count == 0); } else { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear: " "using default mmgr_free\n"); mmgr_free(entry->data); assert(entry->count == 0); entry->data = mmgr_NULL; entry->size = 0; entry->count = 0; entry->id = 0; } count++; } } sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear(%x): cleared %d\n", id, count); }
void check_release(ia_css_resource_type_t resource_type, uint32_t resource_id, bool shouldfail) { enum ia_css_err rval = 0; if (!shouldfail) { rval = ia_css_i_host_rmgr_rel_gen(resource_type, resource_id); if(rval !=IA_CSS_SUCCESS) while(1);//sh_css_dtrace(SH_DBG_ERROR, "Failed rval = %d\n", rval); } else { rval = ia_css_i_host_rmgr_rel_gen(resource_type, resource_id); if(rval == IA_CSS_ERR_RESOURCE_NOT_AVAILABLE) sh_css_dtrace(SH_DBG_ERROR, "Success: resource not avaliable\n"); } }
void free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config) { if( (dvs_6axis_config != NULL) && (*dvs_6axis_config != NULL) ) { sh_css_dtrace(SH_DBG_TRACE, "-> free_dvs_6axis_table dvs_6axis_config %p\n",(*dvs_6axis_config)); if((*dvs_6axis_config)->xcoords_y != NULL) { sh_css_free((*dvs_6axis_config)->xcoords_y); (*dvs_6axis_config)->xcoords_y = NULL; } if((*dvs_6axis_config)->ycoords_y != NULL) { sh_css_free((*dvs_6axis_config)->ycoords_y); (*dvs_6axis_config)->ycoords_y = NULL; } /* Free up UV buffers */ if((*dvs_6axis_config)->xcoords_uv != NULL) { sh_css_free((*dvs_6axis_config)->xcoords_uv); (*dvs_6axis_config)->xcoords_uv = NULL; } if((*dvs_6axis_config)->ycoords_uv != NULL) { sh_css_free((*dvs_6axis_config)->ycoords_uv); (*dvs_6axis_config)->ycoords_uv = NULL; } sh_css_free(*dvs_6axis_config); *dvs_6axis_config = NULL; } sh_css_dtrace(SH_DBG_TRACE, "<- free_dvs_6axis_table dvs_6axis_config %p\n",(*dvs_6axis_config)); }
static enum ia_css_err set_ref_extra_frame_buffer(const struct ia_css_frame *frame, unsigned pipe_num, unsigned stage_num) { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "set_ref_extra_frame_buffer() %08x\n", frame); if (frame == NULL) return IA_CSS_ERR_INVALID_ARGUMENTS; if (frame->info.format != IA_CSS_FRAME_FORMAT_YUV420) return IA_CSS_ERR_INVALID_ARGUMENTS; sh_css_copy_frame_to_spframe(NULL, frame, pipe_num, stage_num, sh_css_frame_ref_extra); return IA_CSS_SUCCESS; }
hrt_vaddress sh_css_refcount_retain(int32_t id, hrt_vaddress ptr) { struct sh_css_refcount_entry *entry; assert(id != FREE_BUF_CACHE); assert(ptr != mmgr_NULL); entry = find_entry(ptr); assert(entry != NULL); if (entry == NULL) return mmgr_NULL; assert(entry->id == id); assert(entry->data == ptr); entry->count += 1; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_retain(%x) 0x%x new refcnt %d\n", id, ptr, entry->count ); return ptr; }
/* Send a frame of data into the input network via the GP FIFO. * Parameters: * - data: array of 16 bit values that contains all data for the frame. * - width: width of a line in number of subpixels, for yuv420 it is the * number of Y components per line. * - height: height of the frame in number of lines. * - ch_id: channel ID. * - fmt_type: format type. * - hblank_cycles: length of horizontal blanking in cycles. * - marker_cycles: number of empty cycles after start-of-line and before * end-of-frame. * - two_ppc: boolean, describes whether to send one or two pixels per clock * cycle. In this mode, we sent pixels N and N+1 in the same cycle, * to IF_PRIM_A and IF_PRIM_B respectively. The caller must make * sure the input data has been formatted correctly for this. * For example, for RGB formats this means that unused values * must be inserted. * - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In * this mode, the odd lines (1,3,5 etc) are half as long as the * even lines (2,4,6 etc). * Note that the first line is odd (1) and the second line is even * (2). * * This function does not do any reordering of pixels, the caller must make * sure the data is in the righ format. Please refer to the CSS receiver * documentation for details on the data formats. */ static void sh_css_hrt_s2m_send_frame( unsigned short *data, unsigned int width, unsigned int height, unsigned int ch_id, unsigned int fmt_type, unsigned int hblank_cycles, unsigned int marker_cycles, unsigned int two_ppc, enum sh_css_mipi_data_type type) { unsigned int i; sh_css_hrt_s2m_start_frame(ch_id, fmt_type); for (i = 0; i < height; i++) { sh_css_dtrace(2, "%s: sending line %d|%d \n", __func__, i, height); #if defined(__KERNEL__) /* add some delay to avoid FIFO overflow*/ usleep_range(1000, 1500); #endif if ((type == sh_css_mipi_data_type_yuv420) && (i & 1) == 1) { sh_css_hrt_s2m_send_line(data, 2 * width, hblank_cycles, marker_cycles, two_ppc, type); data += 2 * width; } else { sh_css_hrt_s2m_send_line(data, width, hblank_cycles, marker_cycles, two_ppc, type); data += width; } } sh_css_hrt_s2m_end_frame(marker_cycles); return; }
struct sh_css_shading_table * sh_css_param_shading_table_get( unsigned int sensor_binning, bool raw_binning) { struct sh_css_shading_table *sc_table_css; struct sh_css_shading_table_isp *tmp_sc_table_isp; struct sh_css_binary *binary = NULL; unsigned num_pipes, p, l; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_get() enter:\n"); sh_css_pipeline_stream_get_num_pipelines(&num_pipes); for (p = 0; p < num_pipes; p++) { struct sh_css_pipeline *pipeline; struct sh_css_pipeline_stage *stage; unsigned int thread_id; sh_css_pipeline_stream_get_pipeline(p, &pipeline); assert(pipeline != NULL); sh_css_query_sp_thread_id(pipeline->pipe_id, &thread_id); for (stage = pipeline->stages; stage; stage = stage->next) { if (stage && stage->binary) { if (stage->binary->info->enable.sc) { binary = stage->binary; break; } } } if (binary) break; } if (binary) sh_css_param_shading_table_prepare( (const struct sh_css_shading_table *)sc_table, sensor_binning, raw_binning, &tmp_sc_table_isp, binary); sc_table_css = sh_css_shading_table_alloc( binary->sctbl_width_per_color, binary->sctbl_height); sc_table_css->sensor_width = tmp_sc_table_isp->sensor_width; sc_table_css->sensor_height = tmp_sc_table_isp->sensor_height; sc_table_css->width = tmp_sc_table_isp->width; sc_table_css->height = tmp_sc_table_isp->height; sc_table_css->fraction_bits = tmp_sc_table_isp->fraction_bits; /* Copy + reformat shading table data from ISP to CSS data structure */ for (l = 0; l < sc_table_css->height; l++) { unsigned int c; for (c = 0; c < SH_CSS_SC_NUM_COLORS; c++) { memcpy(&sc_table_css->data[c][l*sc_table_css->width], sh_table_entry(tmp_sc_table_isp, c, l, 0), sc_table_css->width * sizeof(short)); } } /* Free the isp shading table in HMM */ sh_css_free(tmp_sc_table_isp->data); sh_css_free(tmp_sc_table_isp); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_get() leave:\n"); return sc_table_css; }
struct ia_css_dvs_6axis_config * generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset) { unsigned int x,y; unsigned int width_y; unsigned int height_y; unsigned int width_uv; unsigned int height_uv; enum ia_css_err err = IA_CSS_SUCCESS; struct ia_css_dvs_6axis_config *dvs_config = NULL; dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_malloc(sizeof(struct ia_css_dvs_6axis_config)); if(dvs_config == NULL) { sh_css_dtrace(SH_DBG_TRACE, "out of memory\n"); err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; } else { /*Initialize new struct with latest config settings*/ dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width); dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(frame_res->height); dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(frame_res->width / 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/ dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(frame_res->height / 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/ sh_css_dtrace(SH_DBG_TRACE, "generate_dvs_6axis_table: Env_X %d Env_Y %d\n",dvs_offset->width,dvs_offset->height); sh_css_dtrace(SH_DBG_TRACE, "generate_dvs_6axis_table Y: W %d H %d\n",width_y,height_y); /* Generate Y buffers */ dvs_config->xcoords_y = (uint32_t *)sh_css_malloc( width_y * height_y * sizeof(uint32_t)); if(dvs_config->xcoords_y == NULL) { sh_css_dtrace(SH_DBG_TRACE, "out of memory\n"); err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; } else { for(y = 0; y < height_y; y++) { for(x=0;x<width_y;x++) { dvs_config->xcoords_y[y*width_y + x] = ( ( dvs_offset->width + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS ); } } } dvs_config->ycoords_y = (uint32_t *)sh_css_malloc( width_y * height_y * sizeof(uint32_t)); if(dvs_config->ycoords_y == NULL) { sh_css_dtrace(SH_DBG_TRACE, "out of memory\n"); err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; } else { for(y = 0; y < height_y; y++) { for(x=0;x<width_y;x++) { dvs_config->ycoords_y[y*width_y + x] = ( ( dvs_offset->height + y*DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS ); } } } /* Generate UV buffers */ sh_css_dtrace(SH_DBG_TRACE, "generate_dvs_6axis_table UV W %d H %d\n",width_uv,height_uv); dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc( width_uv * height_uv * sizeof(uint32_t)); if(dvs_config->xcoords_uv == NULL) { sh_css_dtrace(SH_DBG_TRACE, "out of memory\n"); err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; } else { for(y = 0; y < height_uv; y++) { for(x=0;x<width_uv;x++) { /* Envelope dimesions set in Ypixels hence offset UV = offset Y/2 */ dvs_config->xcoords_uv[y*width_uv + x] = ( ( (dvs_offset->width / 2) + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS ); } } } dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc( width_uv * height_uv * sizeof(uint32_t)); if(dvs_config->ycoords_uv == NULL) { sh_css_dtrace(SH_DBG_TRACE, "out of memory\n"); err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; } else { for(y = 0; y < height_uv; y++) { for(x=0;x<width_uv;x++) { /* Envelope dimesions set in Ypixels hence offset UV = offset Y/2 */ dvs_config->ycoords_uv[y*width_uv + x] = ( ( (dvs_offset->height / 2) + y*DVS_BLOCKDIM_Y_CHROMA) << DVS_COORD_FRAC_BITS ); } } } #if 0 for(y = 0; y < height_y; y++) { for(x=0;x<width_y;x++) sh_css_dtrace(SH_DBG_TRACE, "xcoords_y: %d \n",dvs_config->xcoords_y[y*width_y + x]); sh_css_dtrace(SH_DBG_TRACE, "\n"); } for(y = 0; y < height_y; y++) { for(x=0;x<width_y;x++) sh_css_dtrace(SH_DBG_TRACE, "ycoords_y: %d \n",dvs_config->ycoords_y[y*width_y + x]); sh_css_dtrace(SH_DBG_TRACE, "\n"); } for(y = 0; y < height_y; y++) { for(x=0;x<width_uv;x++) sh_css_dtrace(SH_DBG_TRACE, "xcoords_uv: %d \n",dvs_config->xcoords_uv[y*width_uv + x]); sh_css_dtrace(SH_DBG_TRACE, "\n"); } for(y = 0; y < height_uv; y++) { for(x=0;x<width_uv;x++) sh_css_dtrace(SH_DBG_TRACE, "ycoords_uv: %d \n",dvs_config->ycoords_uv[y*width_uv + x]); sh_css_dtrace(SH_DBG_TRACE, "\n"); } #endif if (err != IA_CSS_SUCCESS) { sh_css_dtrace(SH_DBG_TRACE, "generate_dvs_6axis_table: err %d\n, leave() ",(int)err); dvs_config = NULL; } else { sh_css_dtrace(SH_DBG_TRACE, "generate_dvs_6axis_table leave() , dvs_config %p\n",dvs_config); } } return dvs_config; }
static void sh_css_copy_frame_to_spframe(struct sh_css_sp_frame *sp_frame_out, const struct ia_css_frame *frame_in, unsigned pipe_num, unsigned stage_num, enum sh_css_frame_id id) { /* TODO: remove pipe and stage from interface */ (void)pipe_num; (void)stage_num; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_copy_frame_to_spframe frame id %d ptr 0x%08x\n",id, sh_css_sp_stage.frames.static_frame_data[id]); if (frame_in->dynamic_data_index >= 0) { assert((id == sh_css_frame_in) || (id == sh_css_frame_out) || (id == sh_css_frame_out_vf)); /* * value >=0 indicates that function init_frame_pointers() * should use the dynamic data address */ assert(frame_in->dynamic_data_index < SH_CSS_NUM_DYNAMIC_FRAME_IDS); /* * static_frame_data is overloaded, small values (<3) are * the dynamic index, large values are the static address */ sh_css_sp_stage.frames.static_frame_data[id] = frame_in->dynamic_data_index; } else { sh_css_sp_stage.frames.static_frame_data[id] = frame_in->data; } if (!sp_frame_out) return; sh_css_frame_info_to_sp(&sp_frame_out->info, &frame_in->info); switch (frame_in->info.format) { case IA_CSS_FRAME_FORMAT_RAW: sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset; break; case IA_CSS_FRAME_FORMAT_RGB565: case IA_CSS_FRAME_FORMAT_RGBA888: sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset; break; case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: sp_frame_out->planes.planar_rgb.r.offset = frame_in->planes.planar_rgb.r.offset; sp_frame_out->planes.planar_rgb.g.offset = frame_in->planes.planar_rgb.g.offset; sp_frame_out->planes.planar_rgb.b.offset = frame_in->planes.planar_rgb.b.offset; break; case IA_CSS_FRAME_FORMAT_YUYV: case IA_CSS_FRAME_FORMAT_UYVY: case IA_CSS_FRAME_FORMAT_YUV_LINE: sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset; break; case IA_CSS_FRAME_FORMAT_NV11: case IA_CSS_FRAME_FORMAT_NV12: case IA_CSS_FRAME_FORMAT_NV21: case IA_CSS_FRAME_FORMAT_NV16: case IA_CSS_FRAME_FORMAT_NV61: sp_frame_out->planes.nv.y.offset = frame_in->planes.nv.y.offset; sp_frame_out->planes.nv.uv.offset = frame_in->planes.nv.uv.offset; break; case IA_CSS_FRAME_FORMAT_YUV420: case IA_CSS_FRAME_FORMAT_YUV422: case IA_CSS_FRAME_FORMAT_YUV444: case IA_CSS_FRAME_FORMAT_YUV420_16: case IA_CSS_FRAME_FORMAT_YUV422_16: case IA_CSS_FRAME_FORMAT_YV12: case IA_CSS_FRAME_FORMAT_YV16: sp_frame_out->planes.yuv.y.offset = frame_in->planes.yuv.y.offset; sp_frame_out->planes.yuv.u.offset = frame_in->planes.yuv.u.offset; sp_frame_out->planes.yuv.v.offset = frame_in->planes.yuv.v.offset; break; case IA_CSS_FRAME_FORMAT_QPLANE6: sp_frame_out->planes.plane6.r.offset = frame_in->planes.plane6.r.offset; sp_frame_out->planes.plane6.r_at_b.offset = frame_in->planes.plane6.r_at_b.offset; sp_frame_out->planes.plane6.gr.offset = frame_in->planes.plane6.gr.offset; sp_frame_out->planes.plane6.gb.offset = frame_in->planes.plane6.gb.offset; sp_frame_out->planes.plane6.b.offset = frame_in->planes.plane6.b.offset; sp_frame_out->planes.plane6.b_at_r.offset = frame_in->planes.plane6.b_at_r.offset; break; case IA_CSS_FRAME_FORMAT_BINARY_8: sp_frame_out->planes.binary.data.offset = frame_in->planes.binary.data.offset; break; default: /* This should not happen, but in case it does, * nullify the planes */ memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes)); break; } }
static enum sh_css_err sh_css_sp_init_stage(struct sh_css_binary *binary, const char *binary_name, const struct sh_css_blob_info *blob_info, const struct sh_css_binary_args *args, enum sh_css_pipe_id id, unsigned stage, bool preview_mode, bool low_light, bool xnr, unsigned irq_buf_flags, const struct sh_css_hmm_isp_interface *isp_mem_if) { const struct sh_css_binary_info *info = binary->info; enum sh_css_err err = sh_css_success; int i; enum sh_css_pipe_id pipe_id = id; unsigned int thread_id; bool continuous = sh_css_continuous_is_enabled(); { /** * Clear sh_css_sp_stage for easy debugging. * program_input_circuit must be saved as it is set outside * this function. */ unsigned int program_input_circuit; program_input_circuit = sh_css_sp_stage.program_input_circuit; memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage)); sh_css_sp_stage.program_input_circuit = program_input_circuit; } sh_css_query_sp_thread_id(pipe_id, &thread_id); sh_css_sp_group.pipe[thread_id].num_stages++; if (info == NULL) { sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL; return sh_css_success; } sh_css_sp_stage.deinterleaved = stage == 0 && sh_css_continuous_is_enabled(); /* * TODO: Make the Host dynamically determine * the stage type. */ sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE; sh_css_sp_stage.num = stage; sh_css_sp_stage.isp_online = binary && binary->online; sh_css_sp_stage.isp_copy_vf = args->copy_vf; sh_css_sp_stage.isp_copy_output = args->copy_output; /* These flags wil be set from the css top level */ sh_css_sp_stage.irq_buf_flags = irq_buf_flags; /* Copy the frame infos first, to be overwritten by the frames, if these are present. */ sh_css_frame_info_to_sp(&sh_css_sp_stage.frames.in.info, &binary->in_frame_info); sh_css_frame_info_to_sp(&sh_css_sp_stage.frames.out.info, &binary->out_frame_info); sh_css_frame_info_to_sp(&sh_css_sp_stage.frames.internal_frame_info, &binary->internal_frame_info); sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width; sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height; sh_css_sp_stage.isp_deci_log_factor = binary->deci_factor_log2; sh_css_sp_stage.isp_vf_downscale_bits = binary->vf_downscale_log2; sh_css_sp_stage.sp_enable_xnr = xnr; /* sh_css_sp_stage.uds.extra_vectors = info->uds.extra_vectors; */ /* task 3340 */ sh_css_sp_stage.xmem_bin_addr = info->xmem_addr; sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map(); sh_css_sp_stage.anr = low_light; sh_css_isp_stage.blob_info = *blob_info; sh_css_stage_write_binary_info((struct sh_css_binary_info *)info); memcpy(sh_css_isp_stage.binary_name, binary_name, strlen(binary_name)+1); memcpy(&sh_css_isp_stage.mem_interface, isp_mem_if, sizeof(sh_css_isp_stage.mem_interface)); #if 0 { struct sh_css_vector motion; struct sh_css_zoom zoom; sh_css_get_zoom(&zoom); sh_css_get_dis_motion(&motion); sh_css_update_uds_and_crop_info(binary->info, &binary->in_frame_info, &binary->out_frame_info, &binary->dvs_envelope, preview_mode, &zoom, &motion, &sh_css_sp_stage.uds, &sh_css_sp_stage.sp_out_crop_pos); } #else /** * Even when a stage does not need uds and does not params, * sp_uds_init() seems to be called (needs further investigation) * This function can not deal with dx, dy = {0, 0} */ (void)preview_mode; #if 0 sh_css_sp_stage.uds = (struct sh_css_uds_info){HRT_GDC_N, HRT_GDC_N, 0, 0}; #endif #endif sh_css_params_set_current_binary(binary); /* Clean static frame info before we update it */ /* * TODO: Initialize the static frame data with * "sh_css_frame_null". */ for (i = 0; i < SH_CSS_NUM_FRAME_IDS; i++) /* Here, we do not initialize it to zero for now */ /* to be able to recognize non-updated elements */ sh_css_sp_stage.frames.static_frame_data[i] = -1; err = sh_css_sp_write_frame_pointers(args, pipe_id, stage); if (err != sh_css_success) return err; if (continuous && binary->info->enable.raw_binning) { /* TODO: Remove this after preview output decimation is fixed * by configuring out&vf info fiels properly */ sh_css_sp_stage.frames.out.info.padded_width <<= binary->vf_downscale_log2; sh_css_sp_stage.frames.out.info.width <<= binary->vf_downscale_log2; sh_css_sp_stage.frames.out.info.height <<= binary->vf_downscale_log2; } store_sp_stage_data(pipe_id, stage); return sh_css_success; } static enum sh_css_err sp_init_stage(struct sh_css_pipeline_stage *stage, enum sh_css_pipe_id id, unsigned stage_num, bool preview_mode, bool low_light, bool xnr) { struct sh_css_binary *binary = stage->binary; const struct sh_css_fw_info *firmware = stage->firmware; const struct sh_css_binary_args *args = &stage->args; const unsigned char *binary_name; const struct sh_css_binary_info *info = NULL; struct sh_css_binary tmp_binary; const struct sh_css_blob_info *blob_info; struct sh_css_hmm_isp_interface isp_mem_if[SH_CSS_NUM_ISP_MEMORIES]; const struct sh_css_hmm_isp_interface *mem_if = isp_mem_if; enum sh_css_pipe_id pipe_id = id; memset(isp_mem_if, 0, sizeof(isp_mem_if)); if (binary) { info = binary->info; binary_name = (const unsigned char *)(info->blob->name); blob_info = &info->blob->header.blob; } else { info = &firmware->info.isp; sh_css_fill_binary_info(info, false, false, SH_CSS_INPUT_FORMAT_RAW_10, args->in_frame ? &args->in_frame->info : NULL, args->out_frame ? &args->out_frame->info : NULL, args->out_vf_frame ? &args->out_vf_frame->info : NULL, &tmp_binary, false); binary = &tmp_binary; binary->info = info; binary_name = SH_CSS_EXT_ISP_PROG_NAME(firmware); blob_info = &firmware->blob; mem_if = firmware->memory_interface; } sh_css_dtrace(SH_DBG_TRACE, "sp_init_stage(): load binary: %s\n", binary_name); #ifdef __KERNEL__ printk(KERN_ERR "load binary: %s\n", binary_name); #endif sh_css_sp_init_stage(binary, (const char *)binary_name, blob_info, args, pipe_id, stage_num, preview_mode, low_light, xnr, stage->irq_buf_flags, mem_if); return sh_css_success; } void sh_css_sp_init_pipeline(struct sh_css_pipeline *me, enum sh_css_pipe_id id, bool preview_mode, bool low_light, bool xnr, bool two_ppc, bool continuous, bool offline, bool input_needs_raw_binning, enum sh_css_pipe_config_override copy_ovrd) { /* Get first stage */ struct sh_css_pipeline_stage *stage; struct sh_css_binary *first_binary = me->stages->binary; struct sh_css_binary_args *first_args = &me->stages->args; unsigned num; enum sh_css_pipe_id pipe_id = id; unsigned int thread_id; sh_css_query_sp_thread_id(pipe_id, &thread_id); memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline)); /* Count stages */ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) { stage->stage_num = num; sh_css_debug_pipe_graph_dump_stage(stage, id); } me->num_stages = num; if (first_binary != NULL) { /* Init pipeline data */ sh_css_sp_init_group(two_ppc, first_binary->input_format, offline); /* for Capture, do we need to add more modes like */ if (continuous && (first_binary->info->mode == SH_CSS_BINARY_MODE_PREVIEW || first_binary->info->mode == SH_CSS_BINARY_MODE_PRIMARY)) { #if 0 sh_css_queue_buffer(SH_CSS_COPY_PIPELINE, SH_CSS_BUFFER_TYPE_OUTPUT_FRAME, (void *)first_args->cc_frame); #endif sh_css_sp_start_raw_copy(first_binary, first_args->cc_frame, two_ppc, input_needs_raw_binning, copy_ovrd); sh_css_debug_pipe_graph_dump_sp_raw_copy(first_args->cc_frame); } } /* if (first_binary != NULL) */ /* Init stage data */ sh_css_init_host2sp_frame_data(); sh_css_sp_group.pipe[thread_id].num_stages = 0; sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id; /* TODO: next indicates from which queues parameters need to be sampled, needs checking/improvement */ if (sh_css_pipe_uses_params(me)) { sh_css_sp_group.pipe[thread_id].pipe_config = SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id; } /* For continuous use-cases, SP copy is responsible for sampling the * parameters */ if (sh_css_continuous_is_enabled()) sh_css_sp_group.pipe[thread_id].pipe_config = 0; for (stage = me->stages, num = 0; stage; stage = stage->next, num++) sp_init_stage(stage, pipe_id, num, preview_mode, low_light, xnr); store_sp_group_data(); } void sh_css_sp_uninit_pipeline(enum sh_css_pipe_id pipe_id) { unsigned int thread_id; sh_css_query_sp_thread_id(pipe_id, &thread_id); /*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/ sh_css_sp_group.pipe[thread_id].num_stages = 0; }