static int get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ) { int error = 0; mlt_profile profile = (mlt_profile) mlt_frame_pop_get_image( frame ); mlt_properties properties = MLT_FRAME_PROPERTIES(frame); mlt_image_format format_from = *format; mlt_image_format format_to = mlt_image_rgb24; error = mlt_frame_get_image( frame, image, format, width, height, writable ); int frame_colorspace = mlt_properties_get_int( properties, "colorspace" ); if ( !error && *format == mlt_image_yuv422 && profile->colorspace > 0 && frame_colorspace > 0 && frame_colorspace != profile->colorspace ) { mlt_log_debug( NULL, "[filter avcolor_space] colorspace %d -> %d\n", frame_colorspace, profile->colorspace ); // Convert to RGB using frame's colorspace error = convert_image( frame, image, &format_from, format_to ); // Convert to YUV using profile's colorspace if ( !error ) { *image = mlt_properties_get_data( properties, "image", NULL ); format_from = mlt_image_rgb24; format_to = *format; mlt_properties_set_int( properties, "colorspace", profile->colorspace ); error = convert_image( frame, image, &format_from, format_to ); *image = mlt_properties_get_data( properties, "image", NULL ); } } return error; }
bool ImageInput::read_scanline (int y, int z, TypeDesc format, void *data, stride_t xstride) { // native_pixel_bytes is the size of a pixel in the FILE, including // the per-channel format. stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); // perchanfile is true if the file has different per-channel formats bool perchanfile = m_spec.channelformats.size(); // native_data is true if the user asking for data in the native format bool native_data = (format == TypeDesc::UNKNOWN || (format == m_spec.format && !perchanfile)); if (native_data && xstride == AutoStride) xstride = native_pixel_bytes; else m_spec.auto_stride (xstride, format, m_spec.nchannels); // Do the strides indicate that the data area is contiguous? bool contiguous = (native_data && xstride == native_pixel_bytes) || (!native_data && xstride == (stride_t)m_spec.pixel_bytes(false)); // If user's format and strides are set up to accept the native data // layout, read the scanline directly into the user's buffer. if (native_data && contiguous) return read_native_scanline (y, z, data); // Complex case -- either changing data type or stride int scanline_values = m_spec.width * m_spec.nchannels; unsigned char *buf = (unsigned char *) alloca (m_spec.scanline_bytes(true)); bool ok = read_native_scanline (y, z, buf); if (! ok) return false; if (! perchanfile) { // No per-channel formats -- do the conversion in one shot ok = contiguous ? convert_types (m_spec.format, buf, format, data, scanline_values) : convert_image (m_spec.nchannels, m_spec.width, 1, 1, buf, m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, AutoStride, AutoStride); } else { // Per-channel formats -- have to convert/copy channels individually ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels); size_t offset = 0; for (int c = 0; ok && c < m_spec.nchannels; ++c) { TypeDesc chanformat = m_spec.channelformats[c]; ok = convert_image (1 /* channels */, m_spec.width, 1, 1, buf+offset, chanformat, native_pixel_bytes, AutoStride, AutoStride, (char *)data + c*format.size(), format, xstride, AutoStride, AutoStride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_scanline : no support for format %s", m_spec.format.c_str()); return ok; }
bool ImageInput::read_tile (int x, int y, int z, TypeDesc format, void *data, stride_t xstride, stride_t ystride, stride_t zstride) { stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); if (format == TypeDesc::UNKNOWN && xstride == AutoStride) xstride = native_pixel_bytes; m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels, m_spec.tile_width, m_spec.tile_height); bool contiguous = (xstride == native_pixel_bytes && ystride == xstride*m_spec.tile_width && (zstride == ystride*m_spec.tile_height || zstride == 0)); // If user's format and strides are set up to accept the native data // layout, read the tile directly into the user's buffer. bool rightformat = (format == TypeDesc::UNKNOWN) || (format == m_spec.format && m_spec.channelformats.empty()); if (rightformat && contiguous) return read_native_tile (x, y, z, data); // Simple case // Complex case -- either changing data type or stride int tile_values = m_spec.tile_width * m_spec.tile_height * std::max(1,m_spec.tile_depth) * m_spec.nchannels; boost::scoped_array<char> buf (new char [m_spec.tile_bytes(true)]); bool ok = read_native_tile (x, y, z, &buf[0]); if (! ok) return false; if (m_spec.channelformats.empty()) { // No per-channel formats -- do the conversion in one shot ok = contiguous ? convert_types (m_spec.format, &buf[0], format, data, tile_values) : convert_image (m_spec.nchannels, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, ystride, zstride); } else { // Per-channel formats -- have to convert/copy channels individually size_t offset = 0; for (size_t c = 0; c < m_spec.channelformats.size(); ++c) { TypeDesc chanformat = m_spec.channelformats[c]; ok = convert_image (1 /* channels */, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[offset], chanformat, native_pixel_bytes, AutoStride, AutoStride, (char *)data + c*m_spec.format.size(), format, xstride, AutoStride, AutoStride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_tile : no support for format %s", m_spec.format.c_str()); return ok; }
bool write_image(struct mp_image *image, const struct image_writer_opts *opts, const char *filename, struct mp_log *log) { struct image_writer_opts defs = image_writer_opts_defaults; if (!opts) opts = &defs; const struct img_writer *writer = get_writer(opts); struct image_writer_ctx ctx = { log, opts, writer, image->fmt }; int destfmt = get_target_format(&ctx, image->imgfmt); struct mp_image *dst = convert_image(image, destfmt, log); if (!dst) return false; FILE *fp = fopen(filename, "wb"); bool success = false; if (fp == NULL) { mp_err(log, "Error opening '%s' for writing!\n", filename); } else { success = writer->write(&ctx, dst, fp); success = !fclose(fp) && success; if (!success) mp_err(log, "Error writing file '%s'!\n", filename); } talloc_free(dst); return success; }
bool GIFOutput::write_scanline(int y, int z, TypeDesc format, const void* data, stride_t xstride) { return convert_image(spec().nchannels, spec().width, 1 /*1 scanline*/, 1, data, format, xstride, AutoStride, AutoStride, &m_canvas[y * spec().width * 4], TypeDesc::UINT8, 4, AutoStride, AutoStride); }
int main() { byte *image = (byte *)malloc(1<<24); byte palette[256][3] = {0}; for ( int i = 0; i < 256; ++i ) { palette[i][0] = palette[i][1] = palette[i][2] = i; } convert_image(image, 256, 256, 256, palette); return 0; }
void ImageViewer::display_image(const cv::Mat mat) { imageLabel->setPixmap(QPixmap::fromImage(convert_image(mat))); scaleFactor = 1.0; printAct->setEnabled(true); fitToWindowAct->setEnabled(true); updateActions(); imageLabel->adjustSize(); }
void decode_frame(State *state, AVPacket *pkt, int *got_frame, int64_t desired_frame_number) { AVFrame *frame = NULL; *got_frame = 0; // Read frames and return the first one found while (av_read_frame(state->pFormatCtx, pkt) >= 0) { // Is this a packet from the video stream? if (pkt->stream_index == state->video_stream) { int codec_id = state->video_st->codec->codec_id; // If the image isn't already in a supported format convert it to one if (!is_supported_format(codec_id)) { *got_frame = 0; // Allocate video frame frame = avcodec_alloc_frame(); if (!frame) { break; } // Decode video frame if (avcodec_decode_video2(state->video_st->codec, frame, got_frame, pkt) <= 0) { *got_frame = 0; break; } // Did we get a video frame? if (*got_frame) { if (desired_frame_number == -1 || (desired_frame_number != -1 && frame->pkt_pts >= desired_frame_number)) { AVPacket packet; av_init_packet(&packet); packet.data = NULL; packet.size = 0; convert_image(state->video_st->codec, frame, &packet, got_frame); *pkt = packet; break; } } } else { *got_frame = 1; break; } } } // Free the frame av_free(frame); }
void decode_frame(State *state, AVPacket *pkt, int *got_frame, int64_t desired_frame_number, int width, int height) { // Allocate video frame AVFrame *frame = av_frame_alloc(); *got_frame = 0; if (!frame) { return; } // Read frames and return the first one found while (av_read_frame(state->pFormatCtx, pkt) >= 0) { // Is this a packet from the video stream? if (pkt->stream_index == state->video_stream) { int codec_id = state->video_st->codec->codec_id; int pix_fmt = state->video_st->codec->pix_fmt; // If the image isn't already in a supported format convert it to one if (!is_supported_format(codec_id, pix_fmt)) { *got_frame = 0; // Decode video frame if (avcodec_decode_video2(state->video_st->codec, frame, got_frame, pkt) <= 0) { *got_frame = 0; break; } // Did we get a video frame? if (*got_frame) { if (desired_frame_number == -1 || (desired_frame_number != -1 && frame->pkt_pts >= desired_frame_number)) { if (pkt->data) { av_packet_unref(pkt); } av_init_packet(pkt); convert_image(state, state->video_st->codec, frame, pkt, got_frame, width, height); break; } } } else { *got_frame = 1; break; } } } // Free the frame av_frame_free(&frame); }
//**************************************************************************** static void convert_collada_library_images( const char* colladapath, dae_COLLADA* collada, dae_library_images_type* daelibimage, taa_scene* scene, objmap* imagemap) { dae_image_type** daeimageitr = daelibimage->el_image.values; dae_image_type** daeimageend = daeimageitr+daelibimage->el_image.size; while(daeimageitr != daeimageend) { dae_image_type* daeimage = *daeimageitr; convert_image(colladapath, collada, daeimage, scene, imagemap); ++daeimageitr; } }
void video_display(VideoState *is) { SDL_Rect rect; VideoPicture *vp; //AVPicture pict; float aspect_ratio; int w, h, x, y; //int i; vp = &is->pictq[is->pictq_rindex]; if(vp->bmp) { if(is->video_st->codec->sample_aspect_ratio.num == 0) { aspect_ratio = 0; } else { aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * is->video_st->codec->width / is->video_st->codec->height; } if(aspect_ratio <= 0.0) { aspect_ratio = (float)is->video_st->codec->width / (float)is->video_st->codec->height; } /*h = screen->h; w = ((int)rint(h * aspect_ratio)) & -3; if(w > screen->w) { w = screen->w; h = ((int)rint(w / aspect_ratio)) & -3; } x = (screen->w - w) / 2; y = (screen->h - h) / 2; rect.x = x; rect.y = y; rect.w = w; rect.h = h; SDL_DisplayYUVOverlay(vp->bmp, &rect);*/ //displayBmp(&is->video_player, vp->bmp); AVPacket packet; av_init_packet(&packet); packet.data = NULL; packet.size = 0; int got_packet = 0; convert_image(is, is->video_st->codec, vp->bmp, &packet, &got_packet, -1, -1); //av_free(vp->bmp); } }
int main(int argc, char *argv[]) { pnm_init(&argc, argv); if (argc < 3 || !(argc % 2)) { fprintf(stderr, "Usage: %s <filename> <name> ...\n", argv[0]); exit(1); } while (argc >= 2) { convert_image(argv[1], argv[2]); argv += 2; argc -= 2; } exit(0); }
struct mp_image *load_image_png_buf(void *buffer, size_t buffer_size, int imgfmt) { const AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_PNG); if (!codec) return NULL; AVCodecContext *avctx = avcodec_alloc_context3(codec); if (!avctx) return NULL; if (avcodec_open2(avctx, codec, NULL) < 0) { avcodec_free_context(&avctx); return NULL; } AVPacket *pkt = av_packet_alloc(); if (pkt) { if (av_new_packet(pkt, buffer_size) >= 0) memcpy(pkt->data, buffer, buffer_size); } // (There is only 1 outcome: either it takes it and decodes it, or not.) avcodec_send_packet(avctx, pkt); avcodec_send_packet(avctx, NULL); av_packet_free(&pkt); struct mp_image *res = NULL; AVFrame *frame = av_frame_alloc(); if (frame && avcodec_receive_frame(avctx, frame) >= 0) { struct mp_image *r = mp_image_from_av_frame(frame); if (r) res = convert_image(r, imgfmt, mp_null_log); talloc_free(r); } av_frame_free(&frame); avcodec_free_context(&avctx); return res; }
OIIO_NAMESPACE_BEGIN bool ImageBufAlgo::from_IplImage (ImageBuf &dst, const IplImage *ipl, TypeDesc convert) { if (! ipl) { DASSERT (0 && "ImageBufAlgo::fromIplImage called with NULL ipl"); dst.error ("Passed NULL source IplImage"); return false; } #ifdef USE_OPENCV TypeDesc srcformat; switch (ipl->depth) { case int(IPL_DEPTH_8U) : srcformat = TypeDesc::UINT8; break; case int(IPL_DEPTH_8S) : srcformat = TypeDesc::INT8; break; case int(IPL_DEPTH_16U) : srcformat = TypeDesc::UINT16; break; case int(IPL_DEPTH_16S) : srcformat = TypeDesc::INT16; break; case int(IPL_DEPTH_32F) : srcformat = TypeDesc::FLOAT; break; case int(IPL_DEPTH_64F) : srcformat = TypeDesc::DOUBLE; break; default: DASSERT (0 && "unknown IplImage type"); dst.error ("Unsupported IplImage depth %d", (int)ipl->depth); return false; } TypeDesc dstformat = (convert != TypeDesc::UNKNOWN) ? convert : srcformat; ImageSpec spec (ipl->width, ipl->height, ipl->nChannels, dstformat); // N.B. The OpenCV headers say that ipl->alphaChannel, // ipl->colorModel, and ipl->channelSeq are ignored by OpenCV. if (ipl->dataOrder != IPL_DATA_ORDER_PIXEL) { // We don't handle separate color channels, and OpenCV doesn't either dst.error ("Unsupported IplImage data order %d", (int)ipl->dataOrder); return false; } dst.reset (dst.name(), spec); size_t pixelsize = srcformat.size()*spec.nchannels; // Account for the origin in the line step size, to end up with the // standard OIIO origin-at-upper-left: size_t linestep = ipl->origin ? -ipl->widthStep : ipl->widthStep; // Block copy and convert convert_image (spec.nchannels, spec.width, spec.height, 1, ipl->imageData, srcformat, pixelsize, linestep, 0, dst.pixeladdr(0,0), dstformat, spec.pixel_bytes(), spec.scanline_bytes(), 0); // FIXME - honor dataOrder. I'm not sure if it is ever used by // OpenCV. Fix when it becomes a problem. // OpenCV uses BGR ordering // FIXME: what do they do with alpha? if (spec.nchannels >= 3) { float pixel[4]; for (int y = 0; y < spec.height; ++y) { for (int x = 0; x < spec.width; ++x) { dst.getpixel (x, y, pixel, 4); float tmp = pixel[0]; pixel[0] = pixel[2]; pixel[2] = tmp; dst.setpixel (x, y, pixel, 4); } } } // FIXME -- the copy and channel swap should happen all as one loop, // probably templated by type. return true; #else dst.error ("fromIplImage not supported -- no OpenCV support at compile time"); return false; #endif }
IplImage * ImageBufAlgo::to_IplImage (const ImageBuf &src) { #ifdef USE_OPENCV ImageBuf tmp = src; ImageSpec spec = tmp.spec(); // Make sure the image buffer is initialized. if (!tmp.initialized() && !tmp.read(tmp.subimage(), tmp.miplevel(), true)) { DASSERT (0 && "Could not initialize ImageBuf."); return NULL; } int dstFormat; TypeDesc dstSpecFormat; if (spec.format == TypeDesc(TypeDesc::UINT8)) { dstFormat = IPL_DEPTH_8U; dstSpecFormat = spec.format; } else if (spec.format == TypeDesc(TypeDesc::INT8)) { dstFormat = IPL_DEPTH_8S; dstSpecFormat = spec.format; } else if (spec.format == TypeDesc(TypeDesc::UINT16)) { dstFormat = IPL_DEPTH_16U; dstSpecFormat = spec.format; } else if (spec.format == TypeDesc(TypeDesc::INT16)) { dstFormat = IPL_DEPTH_16S; dstSpecFormat = spec.format; } else if (spec.format == TypeDesc(TypeDesc::HALF)) { dstFormat = IPL_DEPTH_32F; // OpenCV does not support half types. Switch to float instead. dstSpecFormat = TypeDesc(TypeDesc::FLOAT); } else if (spec.format == TypeDesc(TypeDesc::FLOAT)) { dstFormat = IPL_DEPTH_32F; dstSpecFormat = spec.format; } else if (spec.format == TypeDesc(TypeDesc::DOUBLE)) { dstFormat = IPL_DEPTH_64F; dstSpecFormat = spec.format; } else { DASSERT (0 && "Unknown data format in ImageBuf."); return NULL; } IplImage *ipl = cvCreateImage(cvSize(spec.width, spec.height), dstFormat, spec.nchannels); if (!ipl) { DASSERT (0 && "Unable to create IplImage."); return NULL; } size_t pixelsize = dstSpecFormat.size() * spec.nchannels; // Account for the origin in the line step size, to end up with the // standard OIIO origin-at-upper-left: size_t linestep = ipl->origin ? -ipl->widthStep : ipl->widthStep; bool converted = convert_image(spec.nchannels, spec.width, spec.height, 1, tmp.localpixels(), spec.format, spec.pixel_bytes(), spec.scanline_bytes(), 0, ipl->imageData, dstSpecFormat, pixelsize, linestep, 0); if (!converted) { DASSERT (0 && "convert_image failed."); cvReleaseImage(&ipl); return NULL; } // OpenCV uses BGR ordering if (spec.nchannels == 3) { cvCvtColor(ipl, ipl, CV_RGB2BGR); } else if (spec.nchannels == 4) { cvCvtColor(ipl, ipl, CV_RGBA2BGRA); } return ipl; #else return NULL; #endif }
const void * ImageOutput::to_native_rectangle (int xbegin, int xend, int ybegin, int yend, int zbegin, int zend, TypeDesc format, const void *data, stride_t xstride, stride_t ystride, stride_t zstride, std::vector<unsigned char> &scratch) { // native_pixel_bytes is the size of a pixel in the FILE, including // the per-channel format, if specified when the file was opened. stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); // perchanfile is true if the file has different per-channel formats bool perchanfile = m_spec.channelformats.size() && supports("channelformats"); // It's an error to pass per-channel data formats to a writer that // doesn't support it. if (m_spec.channelformats.size() && !perchanfile) return NULL; // native_data is true if the user is passing data in the native format bool native_data = (format == TypeDesc::UNKNOWN || (format == m_spec.format && !perchanfile)); // If the user is passing native data and they've left xstride set // to Auto, then we know it's the native pixel size. if (native_data && xstride == AutoStride) xstride = native_pixel_bytes; // Fill in the rest of the strides that haven't been set. m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels, xend-xbegin, yend-ybegin); // Compute width and height from the rectangle extents int width = xend - xbegin; int height = yend - ybegin; int depth = zend - zbegin; // Do the strides indicate that the data area is contiguous? bool contiguous = (xstride == (stride_t)m_spec.pixel_bytes(native_data)); contiguous &= ((ystride == xstride*width || height == 1) && (zstride == ystride*height || depth == 1)); if (native_data && contiguous) { // Data are already in the native format and contiguous // just return a ptr to the original data. return data; } imagesize_t rectangle_pixels = width * height * depth; imagesize_t rectangle_values = rectangle_pixels * m_spec.nchannels; imagesize_t rectangle_bytes = rectangle_pixels * native_pixel_bytes; // Cases to handle: // 1. File has per-channel data, user passes native data -- this has // already returned above, since the data didn't need munging. // 2. File has per-channel data, user passes some other data type // 3. File has uniform data, user passes some other data type // 4. File has uniform data, user passes the right data -- note that // this case already returned if the user data was contiguous // Handle the per-channel format case (#2) where the user is passing // a non-native buffer. if (perchanfile) { if (native_data) { ASSERT (contiguous && "Per-channel native output requires contiguous strides"); } ASSERT (format != TypeDesc::UNKNOWN); ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels); scratch.resize (rectangle_bytes); size_t offset = 0; for (int c = 0; c < m_spec.nchannels; ++c) { TypeDesc chanformat = m_spec.channelformats[c]; convert_image (1 /* channels */, width, height, depth, (char *)data + c*format.size(), format, xstride, ystride, zstride, &scratch[offset], chanformat, native_pixel_bytes, AutoStride, AutoStride, NULL, c == m_spec.alpha_channel ? 0 : -1, c == m_spec.z_channel ? 0 : -1); offset += chanformat.size (); } return &scratch[0]; } // The remaining code is where all channels in the file have the // same data type, which may or may not be what the user passed in // (cases #3 and #4 above). imagesize_t contiguoussize = contiguous ? 0 : rectangle_values * native_pixel_bytes; contiguoussize = (contiguoussize+3) & (~3); // Round up to 4-byte boundary DASSERT ((contiguoussize & 3) == 0); imagesize_t floatsize = rectangle_values * sizeof(float); scratch.resize (contiguoussize + floatsize + rectangle_bytes); // Force contiguity if not already present if (! contiguous) { data = contiguize (data, m_spec.nchannels, xstride, ystride, zstride, (void *)&scratch[0], width, height, depth, format); } // Rather than implement the entire cross-product of possible // conversions, use float as an intermediate format, which generally // will always preserve enough precision. const float *buf; if (format == TypeDesc::FLOAT) { // Already in float format -- leave it as-is. buf = (float *)data; } else { // Convert to from 'format' to float. buf = convert_to_float (data, (float *)&scratch[contiguoussize], rectangle_values, format); } // Convert from float to native format. return convert_from_float (buf, &scratch[contiguoussize+floatsize], rectangle_values, m_spec.quant_black, m_spec.quant_white, m_spec.quant_min, m_spec.quant_max, m_spec.format); }
int main(int argc, char** argv) { for(int i = 1; i < argc; i++) { if(!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")) { usage(argv[0]); exit(2); } else if(!strcmp(argv[i], "-r")) { if(argc >= ++i) { ruleset_name = std::string(argv[i]); } else { fprintf(stderr, "Error: no parameter to -r.\n"); usage(argv[0]); exit(2); } } else if(!strcmp(argv[i], "-x")) { wrap_x = false; } else { if(!infile) infile = argv[i]; else if(!color_mapping_file) color_mapping_file = argv[i]; else if(!outfile) outfile = argv[i]; else { fprintf(stderr, "Unknown option '%s'.\n", argv[i]); usage(argv[0]); exit(2); } } } if(!infile || !outfile || !color_mapping_file) { usage(argv[0]); exit(1); } bool succ = false; if(sdl_init_all()) exit(1); try { convert_image(); succ = true; } catch (boost::archive::archive_exception& e) { printf("boost::archive::archive_exception: %s (code %d).\n", e.what(), e.code); } catch (std::exception& e) { printf("std::exception: %s\n", e.what()); } catch(...) { printf("Unknown exception.\n"); } TTF_Quit(); SDL_Quit(); return succ ? 0 : 1; }
const void * ImageOutput::to_native_rectangle (int xmin, int xmax, int ymin, int ymax, int zmin, int zmax, TypeDesc format, const void *data, stride_t xstride, stride_t ystride, stride_t zstride, std::vector<unsigned char> &scratch) { stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); if (format == TypeDesc::UNKNOWN && xstride == AutoStride) xstride = native_pixel_bytes; m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels, xmax-xmin+1, ymax-ymin+1); // Compute width and height from the rectangle extents int width = xmax - xmin + 1; int height = ymax - ymin + 1; int depth = zmax - zmin + 1; // Do the strides indicate that the data are already contiguous? bool contiguous = (xstride == native_pixel_bytes && (ystride == xstride*width || height == 1) && (zstride == ystride*height || depth == 1)); // Does the user already have the data in the right format? bool rightformat = (format == TypeDesc::UNKNOWN) || (format == m_spec.format && m_spec.channelformats.empty()); if (rightformat && contiguous) { // Data are already in the native format and contiguous // just return a ptr to the original data. return data; } imagesize_t rectangle_pixels = width * height * depth; imagesize_t rectangle_values = rectangle_pixels * m_spec.nchannels; imagesize_t rectangle_bytes = rectangle_pixels * native_pixel_bytes; // Handle the per-channel format case if (m_spec.channelformats.size() && supports("channelformats")) { ASSERT (contiguous && "Per-channel output requires contiguous strides"); ASSERT (format != TypeDesc::UNKNOWN); scratch.resize (rectangle_bytes); size_t offset = 0; for (int c = 0; c < (int)m_spec.channelformats.size(); ++c) { TypeDesc chanformat = m_spec.channelformats[c]; convert_image (1 /* channels */, width, height, depth, (char *)data + c*m_spec.format.size(), format, xstride, ystride, zstride, &scratch[offset], chanformat, native_pixel_bytes, AutoStride, AutoStride, NULL, c == m_spec.alpha_channel ? 0 : -1, c == m_spec.z_channel ? 0 : -1); offset = chanformat.size (); } return &scratch[0]; } imagesize_t contiguoussize = contiguous ? 0 : rectangle_values * native_pixel_bytes; contiguoussize = (contiguoussize+3) & (~3); // Round up to 4-byte boundary DASSERT ((contiguoussize & 3) == 0); imagesize_t floatsize = rectangle_values * sizeof(float); scratch.resize (contiguoussize + floatsize + rectangle_bytes); // Force contiguity if not already present if (! contiguous) { data = contiguize (data, m_spec.nchannels, xstride, ystride, zstride, (void *)&scratch[0], width, height, depth, format); } // Rather than implement the entire cross-product of possible // conversions, use float as an intermediate format, which generally // will always preserve enough precision. const float *buf; if (format == TypeDesc::FLOAT) { // Already in float format -- leave it as-is. buf = (float *)data; } else { // Convert to from 'format' to float. buf = convert_to_float (data, (float *)&scratch[contiguoussize], rectangle_values, format); } // Convert from float to native format. return convert_from_float (buf, &scratch[contiguoussize+floatsize], rectangle_values, m_spec.quant_black, m_spec.quant_white, m_spec.quant_min, m_spec.quant_max, m_spec.format); }
struct mp_image *convert_image(struct mp_image *image, int destfmt, struct mp_log *log) { int d_w, d_h; mp_image_params_get_dsize(&image->params, &d_w, &d_h); struct mp_image_params p = { .imgfmt = destfmt, .w = d_w, .h = d_h, .p_w = 1, .p_h = 1, }; mp_image_params_guess_csp(&p); // If RGB, just assume everything is correct. if (p.color.space != MP_CSP_RGB) { // Currently, assume what FFmpeg's jpg encoder needs. // Of course this works only for non-HDR (no HDR support in libswscale). p.color.levels = MP_CSP_LEVELS_PC; p.color.space = MP_CSP_BT_601; p.chroma_location = MP_CHROMA_CENTER; mp_image_params_guess_csp(&p); } if (mp_image_params_equal(&p, &image->params)) return mp_image_new_ref(image); struct mp_image *dst = mp_image_alloc(p.imgfmt, p.w, p.h); if (!dst) { mp_err(log, "Out of memory.\n"); return NULL; } mp_image_copy_attributes(dst, image); dst->params = p; if (mp_image_swscale(dst, image, mp_sws_hq_flags) < 0) { mp_err(log, "Error when converting image.\n"); talloc_free(dst); return NULL; } return dst; } bool write_image(struct mp_image *image, const struct image_writer_opts *opts, const char *filename, struct mp_log *log) { struct image_writer_opts defs = image_writer_opts_defaults; if (!opts) opts = &defs; struct image_writer_ctx ctx = { log, opts, image->fmt }; bool (*write)(struct image_writer_ctx *, mp_image_t *, FILE *) = write_lavc; int destfmt = 0; #if HAVE_JPEG if (opts->format == AV_CODEC_ID_MJPEG) { write = write_jpeg; destfmt = IMGFMT_RGB24; } #endif if (!destfmt) destfmt = get_target_format(&ctx); struct mp_image *dst = convert_image(image, destfmt, log); if (!dst) return false; FILE *fp = fopen(filename, "wb"); bool success = false; if (fp == NULL) { mp_err(log, "Error opening '%s' for writing!\n", filename); } else { success = write(&ctx, dst, fp); success = !fclose(fp) && success; if (!success) mp_err(log, "Error writing file '%s'!\n", filename); } talloc_free(dst); return success; } void dump_png(struct mp_image *image, const char *filename, struct mp_log *log) { struct image_writer_opts opts = image_writer_opts_defaults; opts.format = AV_CODEC_ID_PNG; write_image(image, &opts, filename, log); }
bool ImageInput::read_tile (int x, int y, int z, TypeDesc format, void *data, stride_t xstride, stride_t ystride, stride_t zstride) { if (! m_spec.tile_width || ((x-m_spec.x) % m_spec.tile_width) != 0 || ((y-m_spec.y) % m_spec.tile_height) != 0 || ((z-m_spec.z) % m_spec.tile_depth) != 0) return false; // coordinates are not a tile corner // native_pixel_bytes is the size of a pixel in the FILE, including // the per-channel format. stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); // perchanfile is true if the file has different per-channel formats bool perchanfile = m_spec.channelformats.size(); // native_data is true if the user asking for data in the native format bool native_data = (format == TypeDesc::UNKNOWN || (format == m_spec.format && !perchanfile)); if (format == TypeDesc::UNKNOWN && xstride == AutoStride) xstride = native_pixel_bytes; m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels, m_spec.tile_width, m_spec.tile_height); // Do the strides indicate that the data area is contiguous? bool contiguous = (native_data && xstride == native_pixel_bytes) || (!native_data && xstride == (stride_t)m_spec.pixel_bytes(false)); contiguous &= (ystride == xstride*m_spec.tile_width && (zstride == ystride*m_spec.tile_height || zstride == 0)); // If user's format and strides are set up to accept the native data // layout, read the tile directly into the user's buffer. if (native_data && contiguous) return read_native_tile (x, y, z, data); // Simple case // Complex case -- either changing data type or stride size_t tile_values = (size_t)m_spec.tile_pixels() * m_spec.nchannels; std::vector<char> buf (m_spec.tile_bytes(true)); bool ok = read_native_tile (x, y, z, &buf[0]); if (! ok) return false; if (! perchanfile) { // No per-channel formats -- do the conversion in one shot ok = contiguous ? convert_types (m_spec.format, &buf[0], format, data, tile_values) : convert_image (m_spec.nchannels, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, ystride, zstride); } else { // Per-channel formats -- have to convert/copy channels individually if (native_data) { ASSERT (contiguous && "Per-channel native input requires contiguous strides"); } ASSERT (format != TypeDesc::UNKNOWN); ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels); size_t offset = 0; for (int c = 0; c < m_spec.nchannels; ++c) { TypeDesc chanformat = m_spec.channelformats[c]; ok = convert_image (1 /* channels */, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[offset], chanformat, native_pixel_bytes, AutoStride, AutoStride, (char *)data + c*format.size(), format, xstride, AutoStride, AutoStride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_tile : no support for format %s", m_spec.format.c_str()); return ok; }
bool ImageInput::read_scanlines (int ybegin, int yend, int z, int firstchan, int nchans, TypeDesc format, void *data, stride_t xstride, stride_t ystride) { nchans = std::min (nchans, m_spec.nchannels-firstchan); yend = std::min (yend, spec().y+spec().height); size_t native_pixel_bytes = m_spec.pixel_bytes (firstchan, nchans, true); imagesize_t native_scanline_bytes = clamped_mult64 ((imagesize_t)m_spec.width, (imagesize_t)native_pixel_bytes); bool native = (format == TypeDesc::UNKNOWN); size_t pixel_bytes = native ? native_pixel_bytes : format.size()*nchans; if (native && xstride == AutoStride) xstride = pixel_bytes; stride_t zstride = AutoStride; m_spec.auto_stride (xstride, ystride, zstride, format, nchans, m_spec.width, m_spec.height); bool contiguous = (xstride == (stride_t) native_pixel_bytes && ystride == (stride_t) native_scanline_bytes); // If user's format and strides are set up to accept the native data // layout, read the scanlines directly into the user's buffer. bool rightformat = (format == TypeDesc::UNKNOWN) || (format == m_spec.format && m_spec.channelformats.empty()); if (rightformat && contiguous) { if (firstchan == 0 && nchans == m_spec.nchannels) return read_native_scanlines (ybegin, yend, z, data); else return read_native_scanlines (ybegin, yend, z, firstchan, nchans, data); } // No such luck. Read scanlines in chunks. const imagesize_t limit = 16*1024*1024; // Allocate 16 MB, or 1 scanline int chunk = std::max (1, int(limit / native_scanline_bytes)); std::vector<unsigned char> buf (chunk * native_scanline_bytes); bool ok = true; int scanline_values = m_spec.width * nchans; for (; ok && ybegin < yend; ybegin += chunk) { int y1 = std::min (ybegin+chunk, yend); ok &= read_native_scanlines (ybegin, y1, z, firstchan, nchans, &buf[0]); if (! ok) break; int nscanlines = y1 - ybegin; int chunkvalues = scanline_values * nscanlines; if (m_spec.channelformats.empty()) { // No per-channel formats -- do the conversion in one shot if (contiguous) { ok = convert_types (m_spec.format, &buf[0], format, data, chunkvalues); } else { ok = convert_image (nchans, m_spec.width, nscanlines, 1, &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, ystride, zstride); } } else { // Per-channel formats -- have to convert/copy channels individually size_t offset = 0; for (int c = 0; ok && c < nchans; ++c) { TypeDesc chanformat = m_spec.channelformats[c+firstchan]; ok = convert_image (1 /* channels */, m_spec.width, nscanlines, 1, &buf[offset], chanformat, pixel_bytes, AutoStride, AutoStride, (char *)data + c*m_spec.format.size(), format, xstride, ystride, zstride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_scanlines : no support for format %s", m_spec.format.c_str()); data = (char *)data + ystride*nscanlines; } return ok; }
int get_embedded_picture(State **ps, AVPacket *pkt) { printf("get_embedded_picture\n"); int i = 0; int got_packet = 0; AVFrame *frame = NULL; State *state = *ps; if (!state || !state->pFormatCtx) { return FAILURE; } // read the format headers if (state->pFormatCtx->iformat->read_header(state->pFormatCtx) < 0) { printf("Could not read the format header\n"); return FAILURE; } // find the first attached picture, if available for (i = 0; i < state->pFormatCtx->nb_streams; i++) { if (state->pFormatCtx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) { printf("Found album art\n"); *pkt = state->pFormatCtx->streams[i]->attached_pic; // Is this a packet from the video stream? if (pkt->stream_index == state->video_stream) { int codec_id = state->video_st->codec->codec_id; // If the image isn't already in a supported format convert it to one if (!is_supported_format(codec_id)) { int got_frame = 0; av_init_packet(pkt); frame = av_frame_alloc(); if (!frame) { break; } if (avcodec_decode_video2(state->video_st->codec, frame, &got_frame, pkt) <= 0) { break; } // Did we get a video frame? if (got_frame) { AVPacket packet; av_init_packet(&packet); packet.data = NULL; packet.size = 0; convert_image(state->video_st->codec, frame, &packet, &got_packet, -1, -1); *pkt = packet; break; } } else { av_init_packet(pkt); pkt->data = state->pFormatCtx->streams[i]->attached_pic.data; pkt->size = state->pFormatCtx->streams[i]->attached_pic.size; got_packet = 1; break; } } } } av_free(frame); if (got_packet) { return SUCCESS; } else { return FAILURE; } }
int get_embedded_picture(State **ps, AVPacket *pkt) { printf("get_embedded_picture\n"); int i = 0; int got_packet = 0; AVFrame *frame = NULL; State *state = *ps; if (!state || !state->pFormatCtx) { return FAILURE; } // TODO commented out 5/31/16, do we actully need this since the context // has been initialized // read the format headers /*if (state->pFormatCtx->iformat->read_header(state->pFormatCtx) < 0) { printf("Could not read the format header\n"); return FAILURE; }*/ // find the first attached picture, if available for (i = 0; i < state->pFormatCtx->nb_streams; i++) { if (state->pFormatCtx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) { printf("Found album art\n"); if (pkt) { av_packet_unref(pkt); av_init_packet(pkt); } av_copy_packet(pkt, &state->pFormatCtx->streams[i]->attached_pic); // TODO is this right got_packet = 1; // Is this a packet from the video stream? if (pkt->stream_index == state->video_stream) { int codec_id = state->video_st->codec->codec_id; int pix_fmt = state->video_st->codec->pix_fmt; // If the image isn't already in a supported format convert it to one if (!is_supported_format(codec_id, pix_fmt)) { int got_frame = 0; frame = av_frame_alloc(); if (!frame) { break; } if (avcodec_decode_video2(state->video_st->codec, frame, &got_frame, pkt) <= 0) { break; } // Did we get a video frame? if (got_frame) { AVPacket convertedPkt; av_init_packet(&convertedPkt); convertedPkt.size = 0; convertedPkt.data = NULL; convert_image(state, state->video_st->codec, frame, &convertedPkt, &got_packet, -1, -1); av_packet_unref(pkt); av_init_packet(pkt); av_copy_packet(pkt, &convertedPkt); av_packet_unref(&convertedPkt); break; } } else { av_packet_unref(pkt); av_init_packet(pkt); av_copy_packet(pkt, &state->pFormatCtx->streams[i]->attached_pic); got_packet = 1; break; } } } } av_frame_free(&frame); if (got_packet) { return SUCCESS; } else { return FAILURE; } }