예제 #1
0
int
audio_open (int dev, struct fileinfo *file)
{
  int             ret;
  int             bits;
  int             dev_type = dev & 0x0f;
  int             mode = file->mode & O_ACCMODE;

  dev = dev >> 4;

  if (dev_type == SND_DEV_DSP16)
    bits = 16;
  else
    bits = 8;

  if ((ret = DMAbuf_open (dev, mode)) < 0)
    return ret;

  if (audio_devs[dev]->coproc)
    if ((ret = audio_devs[dev]->coproc->
	 open (audio_devs[dev]->coproc->devc, COPR_PCM)) < 0)
      {
	audio_release (dev, file);
	printk ("Sound: Can't access coprocessor device\n");

	return ret;
      }

  local_conversion[dev] = 0;

  if (DMAbuf_ioctl (dev, SNDCTL_DSP_SETFMT, (caddr_t) bits, 1) != bits)
    {
      printk ("audio: Can't set number of bits on device %d\n", dev);
      audio_release (dev, file);
      return -(ENXIO);
    }

  if (dev_type == SND_DEV_AUDIO)
    {
      set_format (dev, AFMT_MU_LAW);
    }
  else
    set_format (dev, bits);

  audio_mode[dev] = AM_NONE;
  dev_nblock[dev] = 0;

  return ret;
}
예제 #2
0
    //--------------------------------------------------------------------------
    int CLI::set_format(const std::string& f)
    {
        if (f == "Text" || f == "text")
            return set_format(MediaConchLib::format_Text);
        if (f == "XML" || f == "xml")
            return set_format(MediaConchLib::format_Xml);
        if (f == "MAXML" || f == "maxml")
            return set_format(MediaConchLib::format_MaXml);
        if (f == "JSTREE" || f == "jstree")
            return set_format(MediaConchLib::format_JsTree);
        if (f == "HTML" || f == "html")
            return set_format(MediaConchLib::format_Html);

        return -1;
    }
예제 #3
0
파일: rdpsnd_main.c 프로젝트: alama/freerdp
static int
thread_process_message_wave_info(rdpsndPlugin * plugin, char * data, int data_size)
{
	int wFormatNo;
	int error;

	if (plugin->device_plugin)
		error = plugin->device_plugin->open(plugin->device_plugin);
	else
		error = 1;

	plugin->wTimeStamp = GET_UINT16(data, 0); /* time in ms */
	plugin->local_time_stamp = get_mstime(); /* time in ms */
	wFormatNo = GET_UINT16(data, 2);
	LLOGLN(10, ("thread_process_message_wave_info: data_size %d "
		"wFormatNo %d", data_size, wFormatNo));
	plugin->cBlockNo = GET_UINT8(data, 4);
	plugin->waveDataSize = data_size - 8;
	memcpy(plugin->waveData, data + 8, 4);
	if (wFormatNo != plugin->current_format && !error)
	{
		plugin->current_format = wFormatNo;
		set_format(plugin);
	}
	plugin->expectingWave = 1;
	return error;
}
예제 #4
0
파일: devenum.c 프로젝트: reactos/reactos
static HRESULT load_driver_devices(EDataFlow flow)
{
    WCHAR **ids;
    GUID *guids;
    UINT num, def, i;
    HRESULT hr;

    if(!drvs.pGetEndpointIDs)
        return S_OK;

    hr = drvs.pGetEndpointIDs(flow, &ids, &guids, &num, &def);
    if(FAILED(hr))
        return hr;

    for(i = 0; i < num; ++i){
        MMDevice *dev;
        dev = MMDevice_Create(ids[i], &guids[i], flow, DEVICE_STATE_ACTIVE,
                def == i);
        set_format(dev);
    }

    HeapFree(GetProcessHeap(), 0, guids);
    HeapFree(GetProcessHeap(), 0, ids);

    return S_OK;
}
예제 #5
0
void LogManager::initDefaultConsoleLogger() {

    default_console_appender_->setName(WIND_LOG_TEXT("default_console_appender"));
    set_format(default_console_appender_, default_console_format_);
    default_console_logger_.addAppender(default_console_appender_);
    default_console_logger_.setLogLevel(LogLevel::Trace);
}
예제 #6
0
void init_device()
{
	set_format();
	request_buffer();
	query_buf_and_mmap();
	queue_buffer();
}
예제 #7
0
    std::shared_ptr<stream_profile_interface> software_sensor::add_video_stream(rs2_video_stream video_stream)
    {
        auto exist = (std::find_if(_profiles.begin(), _profiles.end(), [&](std::shared_ptr<stream_profile_interface> profile)
        {
            if (profile->get_unique_id() == video_stream.uid)
            {
                return true;
            }
            return false;
        } ) != _profiles.end());

        if (exist)
        {
            LOG_WARNING("Stream unique ID already exist!");
            throw rs2::error("Stream unique ID already exist!");
        }

        auto profile = std::make_shared<video_stream_profile>(
            platform::stream_profile{ (uint32_t)video_stream.width, (uint32_t)video_stream.height, (uint32_t)video_stream.fps, 0 });
        profile->set_dims(video_stream.width, video_stream.height);
        profile->set_format(video_stream.fmt);
        profile->set_framerate(video_stream.fps);
        profile->set_stream_index(video_stream.index);
        profile->set_stream_type(video_stream.type);
        profile->set_unique_id(video_stream.uid);
        profile->set_intrinsics([=]() {return video_stream.intrinsics; });
        _profiles.push_back(profile);

        return profile;
    }
예제 #8
0
/// Initializes the empty object. \p operation is the function that returned the error code.
libc_exception::libc_exception (const char* operation) throw()
: exception(),
  m_Errno (errno),
  m_Operation (operation)
{
    set_format (xfmt_LibcException);
}
예제 #9
0
static void
set_layout(GnmStyle * style,const psiconv_sheet_cell_layout psi_layout)
{
	GnmColor *color;

	set_format(style,psi_layout->numberformat);
	gnm_style_set_font_size(style,psi_layout->character->font_size);
	gnm_style_set_font_italic(style,psi_layout->character->italic?TRUE:FALSE);
	gnm_style_set_font_bold(style,psi_layout->character->bold?TRUE:FALSE);
	gnm_style_set_font_uline(style,
	                      psi_layout->character->underline?TRUE:FALSE);
	gnm_style_set_font_strike(style,
	                       psi_layout->character->strikethrough?TRUE:FALSE);
	gnm_style_set_font_name(style,
			     (const char *) psi_layout->character->font->name);
	color = get_color(psi_layout->character->color);
	if (color)
		gnm_style_set_font_color (style, color);
	/* TODO: Character level layouts: super_sub */
	/* TODO: Paragraph level layouts: all */
	/* TODO: Background color: add transparant if white */
#if 0
	color = get_color(psi_layout->paragraph->back_color);
	if (color) {
		gnm_style_set_back_color(style, color);
		gnm_style_set_pattern_color(style, color);
		/* TODO: Replace 24 with some symbol */
		gnm_style_set_pattern(style,1);
	}
#endif
}
예제 #10
0
파일: mesh.cpp 프로젝트: ezhangle/fplbase
Mesh::Mesh(const void *vertex_data, int count, int vertex_size,
           const Attribute *format, vec3 *max_position, vec3 *min_position)
    : vertex_size_(vertex_size),
      bone_transforms_(nullptr),
      bone_global_transforms_(nullptr) {
  set_format(format);
  GL_CALL(glGenBuffers(1, &vbo_));
  GL_CALL(glBindBuffer(GL_ARRAY_BUFFER, vbo_));
  GL_CALL(glBufferData(GL_ARRAY_BUFFER, count * vertex_size, vertex_data,
                       GL_STATIC_DRAW));

  // Determine the min and max position
  if (max_position && min_position) {
    max_position_ = *max_position;
    min_position_ = *min_position;
  } else {
    auto data = static_cast<const float *>(vertex_data);
    const Attribute *attribute = format;
    data += VertexSize(attribute, kPosition3f) / sizeof(float);
    int step = vertex_size / sizeof(float);
    min_position_ = vec3(data);
    max_position_ = min_position_;
    for (int vertex = 1; vertex < count; vertex++) {
      data += step;
      min_position_ = vec3::Min(min_position_, vec3(data));
      max_position_ = vec3::Max(max_position_, vec3(data));
    }
  }
}
예제 #11
0
    std::shared_ptr<stream_profile_interface> software_sensor::add_motion_stream(rs2_motion_stream motion_stream)
    {
        auto exist = (std::find_if(_profiles.begin(), _profiles.end(), [&](std::shared_ptr<stream_profile_interface> profile)
        {
            return profile->get_unique_id() == motion_stream.uid;
        }) != _profiles.end());

        if (exist)
        {
            LOG_WARNING("Motion stream unique ID already exist!");
            throw rs2::error("Stream unique ID already exist!");
        }

        auto profile = std::make_shared<motion_stream_profile>(
            platform::stream_profile{ 0, 0, (uint32_t)motion_stream.fps, 0 });
        profile->set_format(motion_stream.fmt);
        profile->set_framerate(motion_stream.fps);
        profile->set_stream_index(motion_stream.index);
        profile->set_stream_type(motion_stream.type);
        profile->set_unique_id(motion_stream.uid);
        profile->set_intrinsics([=]() {return motion_stream.intrinsics; });
        _profiles.push_back(profile);

        return profile;
    }
예제 #12
0
static void open_audio_out()
{
  int fd;
  int flags = O_WRONLY;
  if( nonblock )
  {
    flags |= O_NONBLOCK;
  }
  fd = open("/dev/dsp", flags, 0777);
  if( fd < 0 )
  {
    perror("open dsp");
  }
  else
  {
    int setting, result;

    ioctl(fd, SNDCTL_DSP_RESET);
    setting = 0x00040009;
    result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &setting);
    if( result )
    {
      perror("ioctl(SNDCTL_DSP_SETFRAGMENT)");
    }
    set_format(fd, 0x10, 2, rate);

    fdw = fd;
  }
}
예제 #13
0
int
main (int argc,
      char *argv[])
{
   unicap_handle_t handle;
   unicap_format_t format;
   
   /* Initialize */
   gtk_init (&argc, &argv);
   g_thread_init(NULL);
   gdk_threads_init ();
   gtk_gl_init (&argc, &argv);
   init_extensions ();
   init_gl_resources ();

   handle = open_device();
   set_format( handle );
   unicap_get_format( handle, &format );
   if( ( format.size.width != 640 ) ||
       ( format.size.height != 480 ) )
   {
      g_warning( "The default .cg file assumes a video format of 640x480 pixels. \nYou need to change the yuv.cg file to match your size.\n" );
   }
   
   prepare_yuv_buffer(&yuvbuffer, &format);
   unicap_register_callback( handle, UNICAP_EVENT_NEW_FRAME, (unicap_callback_t)new_frame_cb, NULL ); 
   unicap_start_capture( handle );
   

   /* Gtk window & container */
   GtkWindow *window = GTK_WINDOW (gtk_window_new (GTK_WINDOW_TOPLEVEL));
   glarea = gtk_drawing_area_new ();
   gtk_widget_set_size_request (GTK_WIDGET (glarea), WINDOW_WIDTH, WINDOW_HEIGHT);
   g_signal_connect (glarea, "expose-event", G_CALLBACK (on_expose), NULL);
   g_signal_connect (glarea, "configure-event", G_CALLBACK (on_configure), NULL);
   g_signal_connect (glarea, "map-event", G_CALLBACK (on_mapped), NULL);
   g_signal_connect (window, "delete-event", G_CALLBACK (gtk_main_quit), NULL);

   GdkGLConfig *gl_config;
   gl_config = gdk_gl_config_new_by_mode (GDK_GL_MODE_RGBA | GDK_GL_MODE_DOUBLE);

   if (gl_config == NULL) 
      g_critical ("Failed to setup a double-buffered RGB visual");

   if (! gtk_widget_set_gl_capability (GTK_WIDGET (glarea), 
				       gl_config,
				       NULL,
				       TRUE,
				       GDK_GL_RGBA_TYPE))
      g_critical ("Failed to add gl capability");

   gtk_container_add (GTK_CONTAINER (window), GTK_WIDGET (glarea));
   gtk_widget_show_all (GTK_WIDGET (window));


   /* Main loop */
   gtk_main ();
   return 0;
}
예제 #14
0
static int
sscape_audio_ioctl(int dev, u_int cmd, ioctl_arg arg, int local)
{
    sscape_info    *devc = (sscape_info *) audio_devs[dev]->devc;

    switch (cmd) {
    case SOUND_PCM_WRITE_RATE:
	if (local)
	    return set_speed(devc, (int) arg);
	return *(int *) arg = set_speed(devc, (*(int *) arg));

    case SOUND_PCM_READ_RATE:
	if (local)
	    return 8000;
	return *(int *) arg = 8000;

    case SNDCTL_DSP_STEREO:
	if (local)
	    return set_channels(devc, (int) arg + 1) - 1;
	return *(int *) arg = set_channels(devc, (*(int *) arg) + 1) - 1;

    case SOUND_PCM_WRITE_CHANNELS:
	if (local)
	    return set_channels(devc, (int) arg);
	return *(int *) arg = set_channels(devc, (*(int *) arg));

    case SOUND_PCM_READ_CHANNELS:
	if (local)
	    return 1;
	return *(int *) arg = 1;

    case SNDCTL_DSP_SAMPLESIZE:
	if (local)
	    return set_format(devc, (int) arg);
	return *(int *) arg = set_format(devc, (*(int *) arg));

    case SOUND_PCM_READ_BITS:
	if (local)
	    return 8;
	return *(int *) arg = 8;

    default:;
    }
    return -(EINVAL);
}
void number_formatter_platform_data_t::monitor_locale(const dictionary_t&)
{
    auto_locale_t current_locale(::CFLocaleCopyCurrent());
    std::string   num_format_save(get_format());

    formatter_m.reset(::CFNumberFormatterCreate(NULL, current_locale.get(), kCFNumberFormatterNoStyle));

    set_format(num_format_save);
}
예제 #16
0
파일: mint.c 프로젝트: Deslon/Supernova
static int reset_parameters(out123_handle *ao)
{
	int ret;
	ret = ioctl(ao->fn,AIOCRESET,NULL);
	if(ret >= 0) ret = set_format(ai);
	if(ret >= 0) ret = set_channels(ai);
	if(ret >= 0) ret = set_rate(ai);
	return ret;
}
예제 #17
0
/// Initializes the empty object. \p operation is the function that returned the error code.
stream_bounds_exception::stream_bounds_exception (const char* operation, const char* type, uoff_t offset, size_t expected, size_t remaining) throw()
: libc_exception (operation),
  m_TypeName (type),
  m_Offset (offset),
  m_Expected (expected),
  m_Remaining (remaining)
{
    set_format (xfmt_StreamBoundsException);
}
예제 #18
0
파일: ad_mpg123.c 프로젝트: AbhiDGamer/mpv
static int decode_packet(struct dec_audio *da)
{
    struct ad_mpg123_context *con = da->priv;
    int ret;

    mp_audio_set_null_data(&da->decoded);

    struct demux_packet *pkt;
    if (demux_read_packet_async(da->header, &pkt) == 0)
        return AD_WAIT;
    if (!pkt)
        return AD_EOF;

    /* Next bytes from that presentation time. */
    if (pkt->pts != MP_NOPTS_VALUE) {
        da->pts        = pkt->pts;
        da->pts_offset = 0;
    }

    /* Have to use mpg123_feed() to avoid decoding here. */
    ret = mpg123_feed(con->handle, pkt->buffer, pkt->len);
    talloc_free(pkt);

    if (ret != MPG123_OK)
        goto mpg123_fail;

    unsigned char *audio = NULL;
    size_t bytes = 0;
    ret = mpg123_decode_frame(con->handle, NULL, &audio, &bytes);

    if (ret == MPG123_NEED_MORE)
        return 0;

    if (ret != MPG123_OK && ret != MPG123_DONE && ret != MPG123_NEW_FORMAT)
        goto mpg123_fail;

    ret = set_format(da);
    if (ret != MPG123_OK)
        goto mpg123_fail;

    if (con->sample_size < 1) {
        MP_ERR(da, "no sample size\n");
        return AD_ERR;
    }

    int got_samples = bytes / con->sample_size;
    da->decoded.planes[0] = audio;
    da->decoded.samples = got_samples;

    update_info(da);
    return 0;

mpg123_fail:
    MP_ERR(da, "mpg123 decoding error: %s\n", mpg123_strerror(con->handle));
    return AD_ERR;
}
예제 #19
0
파일: formatspec.cpp 프로젝트: nerd93/oiio
ImageSpec::ImageSpec (TypeDesc format)
    : x(0), y(0), z(0), width(0), height(0), depth(1),
      full_x(0), full_y(0), full_z(0),
      full_width(0), full_height(0), full_depth(0),
      tile_width(0), tile_height(0), tile_depth(1),
      nchannels(0), format(format), alpha_channel(-1), z_channel(-1),
      deep(false)
{
    set_format (format);
}
예제 #20
0
/// Initializes the empty object. \p operation is the function that returned the error code.
file_exception::file_exception (const char* operation, const char* filename) throw()
: libc_exception (operation)
{
    memset (m_Filename, 0, VectorSize(m_Filename));
    set_format (xfmt_FileException);
    if (filename) {
	strncpy (m_Filename, filename, VectorSize(m_Filename));
	m_Filename [VectorSize(m_Filename) - 1] = 0;
    }
}
예제 #21
0
void generator_node_t::do_calc_format( const render::context_t& context)
{
    image::format_t f( get_value<image::format_t>( param( "format")));
    Imath::Box2i area( f.area());
    --area.max.x;
    --area.max.y;
    set_format( area);
    set_aspect_ratio( f.aspect);
    set_proxy_scale( Imath::V2f( 1, 1));
}
예제 #22
0
파일: ao_wasapi.c 프로젝트: wrl/mpv
static int try_format(struct wasapi_state *state,
                      struct ao *const ao,
                      int bits, int samplerate,
                      const struct mp_chmap channels)
{
    WAVEFORMATEXTENSIBLE wformat;
    set_format(&wformat, bits / 8, samplerate, channels.num, mp_chmap_to_waveext(&channels));

    int af_format = format_set_bits(ao->format, bits, bits == 32);

    EnterCriticalSection(&state->print_lock);
    mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: trying %dch %s @ %dhz\n",
           channels.num, af_fmt_to_str(af_format), samplerate);
    LeaveCriticalSection(&state->print_lock);

    union WAVEFMT u;
    u.extensible = &wformat;

    WAVEFORMATEX *closestMatch;
    HRESULT hr = IAudioClient_IsFormatSupported(state->pAudioClient,
                                                state->share_mode,
                                                u.ex, &closestMatch);

    if (closestMatch) {
        if (closestMatch->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
            u.ex = closestMatch;
            wformat = *u.extensible;
        } else {
            wformat.Format = *closestMatch;
        }

        CoTaskMemFree(closestMatch);
    }

    if (hr == S_FALSE) {
        if (set_ao_format(state, ao, wformat)) {
            EnterCriticalSection(&state->print_lock);
            mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: accepted as %dch %s @ %dhz\n",
                ao->channels.num, af_fmt_to_str(ao->format), ao->samplerate);
            LeaveCriticalSection(&state->print_lock);

            return 1;
        }
    } if (hr == S_OK || (!state->opt_exclusive && hr == AUDCLNT_E_UNSUPPORTED_FORMAT)) {
        // AUDCLNT_E_UNSUPPORTED_FORMAT here means "works in shared, doesn't in exclusive"
        if (set_ao_format(state, ao, wformat)) {
            EnterCriticalSection(&state->print_lock);
            mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: %dch %s @ %dhz accepted\n",
                   ao->channels.num, af_fmt_to_str(af_format), samplerate);
            LeaveCriticalSection(&state->print_lock);
            return 1;
        }
    }
    return 0;
}
예제 #23
0
파일: formatspec.cpp 프로젝트: nerd93/oiio
ImageSpec::ImageSpec (int xres, int yres, int nchans, TypeDesc format)
    : x(0), y(0), z(0), width(xres), height(yres), depth(1),
      full_x(0), full_y(0), full_z(0),
      full_width(xres), full_height(yres), full_depth(1),
      tile_width(0), tile_height(0), tile_depth(1),
      nchannels(nchans), format(format), alpha_channel(-1), z_channel(-1),
      deep(false)
{
    set_format (format);
    default_channel_names ();
}
/**
* Sends the JTAG state machine to the idle state
*/
static void set_JTAG_to_idle(void)
{
	/* Go to idle...*/
	transitions("0111110",TDI_0);

	if (!in_normal_scan_mode()) {
		/* Go back to 2wire mode.  We've destroyed things in RTI.*/
		set_format(normal);
		set_JTAG_to_idle();
		enter_2wire_mode();
	}
}
/**
*  Enters oscan1 mode.
*/
static void enter_oscan1(void)
{
	/* Enter the command STFMT MODE
	   where :
	   parameter STFMT    = 5'h03;  Store Format
	   parameter TAP7_OSCAN1 = 5'h9; */
	send_2part_command(STFMT,OSCAN1);
	/* After the command, must send a check packet */
	check_packet();
	/* Now in oscan1.  Port must know this. */
	set_format(oscan1);
}
예제 #26
0
void load_videoin_config(void)
{
	set_format(config_read_int("vin_format", VIDEO_FORMAT_CVBS6));

	set_value(CONTROL_BRIGHTNESS, config_read_int("vin_brightness", 0));
	set_value(CONTROL_CONTRAST, config_read_int("vin_contrast", 0x80));
	set_value(CONTROL_HUE, config_read_int("vin_hue", 0));

	mtk_cmdf(appid, "s_brightness.set(-value %d)", brightness);
	mtk_cmdf(appid, "s_contrast.set(-value %d)", contrast);
	mtk_cmdf(appid, "s_hue.set(-value %d)", hue);
}
예제 #27
0
void expand_node_t::do_calc_format( const render::context_t& context)
{
	image_node_t *in = input_as<image_node_t>();
    Imath::Box2i box( in->format());	
    int exp_t = get_absolute_value<float>( param( "top"));
    int exp_l = get_absolute_value<float>( param( "left"));
    int exp_b = get_absolute_value<float>( param( "bottom"));
    int exp_r = get_absolute_value<float>( param( "right"));
    set_format( Imath::Box2i( Imath::V2i( box.min.x - exp_l, box.min.y - exp_t), Imath::V2i( box.max.x + exp_r, box.max.y + exp_b)));
	set_aspect_ratio( in->aspect_ratio());
	set_proxy_scale( in->proxy_scale());
}
예제 #28
0
파일: ad_mpg123.c 프로젝트: Aseeker/mpv
static int decode_audio(struct dec_audio *da, struct mp_audio *buffer, int maxlen)
{
    struct ad_mpg123_context *con = da->priv;
    void *buf = buffer->planes[0];
    int ret;

    if (con->new_format) {
        ret = set_format(da);
        if (ret == MPG123_OK) {
            return 0; // let caller handle format change
        } else if (ret == MPG123_NEED_MORE) {
            con->need_data = true;
        } else {
            goto mpg123_fail;
        }
    }

    if (con->need_data) {
        if (feed_new_packet(da) < 0)
            return -1;
    }

    if (!mp_audio_config_equals(&da->decoded, buffer))
        return 0;

    size_t got_now = 0;
    ret = mpg123_replace_buffer(con->handle, buf, maxlen * con->sample_size);
    if (ret != MPG123_OK)
        goto mpg123_fail;

    ret = mpg123_decode_frame(con->handle, NULL, NULL, &got_now);

    int got_samples = got_now / con->sample_size;
    buffer->samples += got_samples;
    da->pts_offset += got_samples;

    if (ret == MPG123_NEW_FORMAT) {
        con->new_format = true;
    } else if (ret == MPG123_NEED_MORE) {
        con->need_data = true;
    } else if (ret != MPG123_OK && ret != MPG123_DONE) {
        goto mpg123_fail;
    }

    update_info(da);
    return 0;

mpg123_fail:
    MP_ERR(da, "mpg123 decoding error: %s\n",
           mpg123_strerror(con->handle));
    return -1;
}
예제 #29
0
파일: omap1_camera.c 프로젝트: 020gzh/linux
static int omap1_cam_set_fmt(struct soc_camera_device *icd,
			      struct v4l2_format *f)
{
	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
	const struct soc_camera_format_xlate *xlate;
	struct device *dev = icd->parent;
	struct soc_camera_host *ici = to_soc_camera_host(dev);
	struct omap1_cam_dev *pcdev = ici->priv;
	struct v4l2_pix_format *pix = &f->fmt.pix;
	struct v4l2_subdev_format format = {
		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
	};
	struct v4l2_mbus_framefmt *mf = &format.format;
	int ret;

	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
	if (!xlate) {
		dev_warn(dev, "%s: format %#x not found\n", __func__,
				pix->pixelformat);
		return -EINVAL;
	}

	mf->width	= pix->width;
	mf->height	= pix->height;
	mf->field	= pix->field;
	mf->colorspace	= pix->colorspace;
	mf->code	= xlate->code;

	ret = dma_align(&mf->width, &mf->height, xlate->host_fmt, pcdev->vb_mode,
			true);
	if (ret < 0) {
		dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
				__func__, pix->width, pix->height,
				xlate->host_fmt->name);
		return ret;
	}

	ret = set_format(pcdev, dev, icd, sd, &format, xlate);
	if (ret < 0) {
		dev_err(dev, "%s: failed to set format\n", __func__);
		return ret;
	}

	pix->width	 = mf->width;
	pix->height	 = mf->height;
	pix->field	 = mf->field;
	pix->colorspace  = mf->colorspace;
	icd->current_fmt = xlate;

	return 0;
}
예제 #30
0
파일: omap1_camera.c 프로젝트: 020gzh/linux
static int omap1_cam_set_crop(struct soc_camera_device *icd,
			       const struct v4l2_crop *crop)
{
	const struct v4l2_rect *rect = &crop->c;
	const struct soc_camera_format_xlate *xlate = icd->current_fmt;
	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
	struct device *dev = icd->parent;
	struct soc_camera_host *ici = to_soc_camera_host(dev);
	struct omap1_cam_dev *pcdev = ici->priv;
	struct v4l2_subdev_format fmt = {
		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
	};
	struct v4l2_mbus_framefmt *mf = &fmt.format;
	int ret;

	ret = subdev_call_with_sense(pcdev, dev, icd, sd, video, s_crop, crop);
	if (ret < 0) {
		dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__,
			 rect->width, rect->height, rect->left, rect->top);
		return ret;
	}

	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
	if (ret < 0) {
		dev_warn(dev, "%s: failed to fetch current format\n", __func__);
		return ret;
	}

	ret = dma_align(&mf->width, &mf->height, xlate->host_fmt, pcdev->vb_mode,
			false);
	if (ret < 0) {
		dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
				__func__, mf->width, mf->height,
				xlate->host_fmt->name);
		return ret;
	}

	if (!ret) {
		/* sensor returned geometry not DMA aligned, trying to fix */
		ret = set_format(pcdev, dev, icd, sd, &fmt, xlate);
		if (ret < 0) {
			dev_err(dev, "%s: failed to set format\n", __func__);
			return ret;
		}
	}

	icd->user_width	 = mf->width;
	icd->user_height = mf->height;

	return 0;
}