char *charRecognition_getText(struct charRecognition *charReg, SDL_Surface *surface, char *dic) { char *recognized = ""; ImageBlockArray imageBlock = charDetection_blocks(surface); for (unsigned h = 0; h < imageBlock.size; h++) { ImageLineArray imageLine = imageBlock.elements[h].lines; for (unsigned i = 0; i < imageLine.size; i++) { char *curWord = ""; for (unsigned j = 0; j < imageLine.elements[i].chars.size; j++) { struct ImageChar imageChar = imageLine.elements[i].chars.elements[j]; if (imageChar.space) { if (strcmp(curWord, "") > 0) recognized = string_concat( recognized, wordCorrector_correct( dic, curWord)); curWord = ""; recognized = string_concat(recognized, " "); continue; } SDL_Surface *s = image_scale( image_extractChar(surface, &imageChar), 16, 16); imageChar.content = charRecognition_getChar(charReg, s); curWord = string_concatChar( curWord, tolower(charRecognition_getChar( charReg, s))); } if (strcmp(curWord, "") > 0) recognized = string_concat( recognized, wordCorrector_correct(dic, curWord)); recognized = string_concat(recognized, "\n"); } recognized = string_concat(recognized, "\n"); } return recognized; }
struct charRecognition *charRecognition_learn(char *rootPath, char chars[], size_t size, size_t variants) { struct charRecognition *charReg = malloc(sizeof(struct charRecognition)); struct NeuralNetwork *myNeuralNetwork = neuralNetwork_main(256, HIDDEN_LAYER_COUNT, size); unsignedArray2D input = new_unsignedArray2D(size * variants, 256); unsignedArray2D output = new_unsignedArray2D(size * variants, size); unsigned count = 0; for(unsigned i = 0; i < size; i++) { for(unsigned j = 0; j < variants; j++) { char toAscii[15]; sprintf(toAscii, "%d", (int)chars[i]); char *path = string_concat(rootPath, toAscii); path = string_concat(path, "/"); char filename[5]; sprintf(filename, "%d", j); path = string_concat(path, filename); path = string_concat(path, ".bmp"); SDL_Surface *s = image_scale( image_crop(image_load(path)), 16, 16); for (unsigned k = 0; k < 16; k++) for (unsigned l = 0; l < 16; l++) input.elements[count].elements[k + l * 16] = image_getPixelBool(s, k, l); for(unsigned k = 0; k < size; k++) output.elements[count].elements[k] = ((count / variants) == k); count++; } } NeuralNetwork_train(myNeuralNetwork, input, output, 0.05, 0.1, 0.9); charReg->letters = chars; charReg->size = size; charReg->network = myNeuralNetwork; return charReg; }
static GLuint theme_image(const char *path) { const int W = video.device_w; const int H = video.device_h; int W2, H2; int w, h, b; void *p; GLuint o = 0; /* * Disable mipmapping and do a manual downscale. Heuristic for * downscaling the texture: assume target size to be roughly 1/16 * of a full screen texture, smallest size being 32x32. */ image_near2(&W2, &H2, W, H); W2 = MAX(W2 / 16, 32); H2 = MAX(H2 / 16, 32); if ((p = image_load(path, &w, &h, &b))) { void *q; /* Prefer a small scale factor. */ int s = MAX(w, h) / MAX(W2, H2); if (s > 1 && (q = image_scale(p, w, h, b, &w, &h, s))) { free(p); p = q; } o = make_texture(p, w, h, b, 0); free(p); p = NULL; } return o; }
/* ================= gl_texture_create ================= */ erbool gl_texture_create (image_t *image, int flags, int *gltex, int *texw, int *texh) { int max, sw, sh, mip; GLuint tex; if (NULL == image || NULL == gltex || NULL == texw || NULL == texh) { sys_printf("bad args (image=%p, flags=%i, gltex=%p, texw=%p, texh=%p)\n", image, flags, gltex, texw, texh); return false; } if (flags & GL_TEX_FL_TEX3D) { max = gl_texture3d_size_max; } else if (flags & GL_TEX_FL_CUBEMAP) { max = gl_texture_cube_map_size_max; } else { max = gl_max_texture_size; } if (GL_TEX_FL_NOPICMIP) { sw = CLAMP(image->width, 1, max); sh = CLAMP(image->height, 1, max); } else { sw = CLAMP(image->width >> gl_picmip->i, 1, max); sh = CLAMP(image->height >> gl_picmip->i, 1, max); } if (!ext_gl_arb_texture_non_power_of_two || !gl_arb_texture_non_power_of_two->i) { sw = ceil_pwrov2(sw); sh = ceil_pwrov2(sh); } sw = CLAMP(sw, GL_MIN_TEXTURE_DIMENSION, max); sh = CLAMP(sh, GL_MIN_TEXTURE_DIMENSION, max); if (flags & GL_TEX_FL_NOSCALE) { *texw = sw; *texh = sh; if (!image_resize(image, sw, sh)) return false; } else { *texw = image->width; *texh = image->height; if (!image_scale(image, sw, sh)) return false; } glGenTextures(1, &tex); GLERROR(); eglBindTexture(GL_TEXTURE_2D, tex); GLERROR(); *gltex = tex; if (NULL != image->teximage2d) { image->teximage2d(image); } else { if (ext_gl_sgis_generate_mipmap && gl_sgis_generate_mipmap->i) { #ifdef ENGINE_OS_IPHONE glGenerateMipmapOES(GL_TEXTURE_2D); #else glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE); #endif GLERROR(); GL_IMAGE_DATA2D(0, image); GLERROR(); } else { GL_IMAGE_DATA2D(0, image); GLERROR(); for (mip = 1; image->width > 1 || image->height > 1 ; mip++) { int status; if (0 > (status = image_mipmap(image))) { sys_printf("mipmap failed\n"); goto error; } else if (status > 0) { break; } GL_IMAGE_DATA2D(mip, image); GLERROR(); } } } if (flags & GL_TEX_FL_NOFILTER) { glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); GLERROR(); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); GLERROR(); } else { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gl_trilinear->i ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR_MIPMAP_NEAREST); GLERROR(); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); GLERROR(); } if (!(flags & GL_TEX_FL_NOANISO) && ext_gl_ext_texture_filter_anisotropic && gl_ext_texture_filter_anisotropic->i) { GLfloat ani = CLAMP(gl_anisotropy_level->f, 1, gl_anisotropy_max); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, ani); GLERROR(); } if (!(flags & GL_TEX_FL_NOLOD) && ext_gl_ext_texture_lod_bias && gl_ext_texture_lod_bias->i) { GLfloat lod = CLAMP(gl_lod_bias->f, 0, gl_lod_bias_max); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_LOD_BIAS_EXT, lod); GLERROR(); } return true; error: glDeleteTextures(1, &tex); GLERROR(); return false; }
/** * jpeg file : need to updata pdvr->disp_bitmap to display; * video: need to call dispFlip(); **/ int dvr_thmb_dispFullScreen(cdvr_thmb_t *pdvr, char *disp_pData) { int ret = -1; SINT64 stime, etime; gp_bitmap_t *pbitmap= NULL; if(!pdvr) { return -1; } char *purl = NULL; int type = -1; dvr_thmb_setDispFullMode(pdvr); if(DispBufManage.DispDev == C_DISP_BUFFER) { pdvr->upFrame = (unsigned char *)get_idle_buffer(0); clean_buffer(pdvr->upFrame, 0); } else { pdvr->upFrame = (char *)dispGetFramebuffer(pdvr->hDisp); } pdvr->pUrl = FilelistGetFilePath(pdvr->pFile, dvr_thmb_getCurIdxOfFile(pdvr)); pdvr->fileType = checkFileType(pdvr->pUrl); printf("[%s:%d]curIdx %d curPage %d pageNum %d type %d\n", __FUNCTION__, __LINE__, pdvr->cur_idx, pdvr->cur_page, pdvr->cur_pageNum, pdvr->fileType); purl = pdvr->pUrl; type = pdvr->fileType; photo_uninit(pdvr->pPhotoInfo); //for jpeg->video->jpeg pdvr->pPhotoInfo = NULL; if(purl) { stime = sysGetCurTime(); ret = -1; if(type == GP_FILE_TYPE_VIDEO) { pbitmap= &pdvr->disp_bitmap; pbitmap->pData = (unsigned char*)pdvr->upFrame; ret = mcpGetThumbnail(pbitmap, purl, 0); } else if(type == GP_FILE_TYPE_JPEG) { pdvr->pPhotoInfo = photo_init(purl, &pdvr->disp_bitmap); if(pdvr->pPhotoInfo) { pdvr->pPhotoInfo->bitmap->pData = (unsigned char*)pdvr->upFrame; ret = image_scale(pdvr->pPhotoInfo); if(ret == -1) { } } } etime = sysGetCurTime(); printf("thumbnail used time %lld - %lld = %lldms ret %d\n", etime, stime, etime - stime, ret); if(ret == -1) { //dispCleanFramebuffer(pdvr->hDisp); if(DispBufManage.DispDev == C_DISP_BUFFER) { pdvr->upFrame = (unsigned char *)get_idle_buffer(0); clean_buffer(pdvr->upFrame, 0); } else { pdvr->upFrame = (char *)dispGetFramebuffer(pdvr->hDisp); dispCleanFramebuffer(pdvr->hDisp); } printf("WARNNING: get fulscreen error!!!!!!!\n"); } } dvr_thmb_dispFlip(pdvr); return ret; }
static int lualock_lua_image_scale(lua_State *L) { image_t *image = luaL_checkudata(L, 1, "lualock.image"); image_scale(image, lua_tonumber(L, 2), lua_tonumber(L, 3)); return 0; }
void image_resize(image_t *image, gdouble width, gdouble height) { image_scale(image, width / (gdouble) image->layer->width, height / (gdouble) image->layer->height); }