Exemple #1
0
void TIM6_DAC_IRQHandler(void)
{
	//if sr & uif are both enabled
	if(TIM6->SR & TIM_SR_UIF)
	{
		GPIOD->ODR ^= (1<<14); //toggle D14
		vector_a[i] = ADC3ConvertedValue[0];
        vector_b[i] = ADC3ConvertedValue[1];
        vector_c[i] = ADC3ConvertedValue[2];
        vector_d[i] = ADC3ConvertedValue[3];
//		outvalue+=ADC3ConvertedValue[0]; //pseudorandom number generation
		outvalue+=ADC3ConvertedValue[0];
		outvalue%=ADC3ConvertedValue[1];
		outvalue+=ADC3ConvertedValue[2];
//		pre_filter = (outvalue%=4096);
		outvalue%=4096;
//		outvalue+=sig[i];
//		outvalue>>=1;
//		outvalue = (uint32_t)(outvalue*0.5) + (uint32_t)(sig[(VECSIZE+i)%VECSIZE]*0.5);
		outvalue = apply_filter(outvalue, &DINBUF, &DOUTBUF, &ap_1);
		outvalue = apply_filter(outvalue, &DINBUF, &DOUTBUF, &ap_2);
		outvalue = apply_filter(outvalue, &DINBUF, &DOUTBUF, &ap_3);
		outvalue = apply_filter(outvalue, &DINBUF, &DOUTBUF, &ap_4);
//    	DAC1ConvertedValue = (uint16_t)(0.7*outvalue) + (uint16_t)(0.3*sig[(VECSIZE/9+i)%VECSIZE]);
		DAC1ConvertedValue = sig[i] = outvalue;
		i++;
        i%=VECSIZE;
	}
	TIM6->SR = 0x0;
}
/**
 * Removes and destroys test suites that are not selected or
 * explicitly excluded.
 */
static void filter_suites(array_t *loaded)
{
	char *filter;

	filter = getenv("TESTS_SUITES");
	if (filter)
	{
		apply_filter(loaded, filter, FALSE);
	}
	filter = getenv("TESTS_SUITES_EXCLUDE");
	if (filter)
	{
		apply_filter(loaded, filter, TRUE);
	}
}
Exemple #3
0
void
main (int argc, char ** argv)
{
  struct f_context c;
  filter f;

  if (argc != 5)
    usage ();
  (c.m) = (convert_index_arg ((argv[1]), 8196));
  (c.delay) = (convert_index_arg ((argv[3]), 8196));
  (c.mu) = (convert_real_arg (argv[2]));
  (c.h) = (xmalloc ((c.m) * (sizeof (REAL))));
  {
    REAL * sh = (c.h);
    REAL * eh = (sh + (c.m));
    while (sh < eh)
      (*sh++) = 0.;
  }
  switch (convert_index_arg ((argv[4]), 2))
    {
    case 0:
      f = cancel_noise;
      break;
    case 1:
      f = cancel_tones;
      break;
    }
  apply_filter ((make_filter_state ((fileno (stdin)),
				    (fileno (stdout)),
				    8000,
				    ((c.m) + (c.delay)))),
		f,
		(&c));
  exit (0);
}
/**
    \fn getNextFrame
*/
bool         Msharpen::getNextFrame(uint32_t *fn,ADMImage *image)
{
ADMImage *src,*blur,*dst;

    src=vidCache->getImage(nextFrame);
    if(!src)
        return false; // EOF
    
    blur=blurrImg;
    dst=image;

    dst->Pts=src->Pts;
	
    for (int i=0;i<3;i++)
    {
            
            blur_plane(src, blur, i);
            detect_edges(blur, dst,  i);
            if (_param.highq == true)
                detect_edges_HiQ(blur, dst,  i);
            if (!_param.mask) 
                apply_filter(src, blur, dst,  i);
    }

    *fn=nextFrame;
    nextFrame++;
    vidCache->unlockAll();
    return true;
}
/**
    \fn getNextFrame
*/
bool         Msharpen::getNextFrame(uint32_t *fn,ADMImage *image)
{
ADMImage *src,*blur,*dst;
/*
	PVideoFrame src = child->GetFrame(n, env);
	PVideoFrame blur = env->NewVideoFrame(vi);
	PVideoFrame dst = env->NewVideoFrame(vi);
*/
unsigned char *blurp;
unsigned char *dstp;

	dst=image;
	src=vidCache->getImage(nextFrame);
    if(!src)
        return false; // EOF
	blur=blurrImg;
    dst->Pts=src->Pts;
	{
		for (int i=0;i<3;i++)
		{
            blurp=blur->GetReadPtr((ADM_PLANE)i);
			blur_plane(src, blur, blurp ,i);
            dstp=dst->GetWritePtr((ADM_PLANE)i);
			detect_edges(blur, dst, dstp, i);
			if (_param.highq == true)
				detect_edges_HiQ(blur, dst, dstp, i);

			if (!_param.mask) apply_filter(src, blur, dst, dstp, i);
		}
	}
    *fn=nextFrame;
    nextFrame++;
	vidCache->unlockAll();
	return true;
}
Exemple #6
0
/** Joue la vidéo à intervalle fixe */
void Ui_MainWindow::timerEvent(QTimerEvent*){
  int i, count, taille;
  /* THAT SUCKS ! But I don't get how opencv manages memory */
  bool should_free = false;
  /* END OF MAJOR SUCKINESS */
  type_filtre filter;
  QString type, param;
  QListWidgetItem* item_i;
  count = listWidget->count();
  current_image = cvQueryFrame(source);
  if (!current_image) {
    stop_playback();
    return;
  }

  update_image_infos();
  fit_window();

  for(i = 0; i < count; i++) {
    item_i = listWidget->item(i);
    if (item_i->text() != "Nouveau") {
      type = item_i->text().section(" ", 0, 0);
      filter = filter_of_id(type);
      if (MEDIAN == filter || BILATERAL == filter)
        should_free = true;
      param = item_i->text().section(" ", 1, 1);
      taille = param.at(0).digitValue();
      current_image = apply_filter(current_image, filter_of_id(type), taille);
    }
  }
  cvwidget->putImage(current_image);
  if (should_free)
    cvReleaseImage(&current_image);
  return;
}
Exemple #7
0
static void callback_header_clicked(GtkWidget* widget, GdkEventButton* event, gpointer column)
{
    if(event->type==GDK_BUTTON_PRESS)
        {
        if(event->button==3) /* Right click. */
            {
            gtk_menu_popup(GTK_MENU(g_MainWindow.romHeaderMenu), NULL, NULL, NULL, NULL,
            event->button, event->time);
            }
        else if(event->button==1) /* Left click. */
            {
            if(g_MainWindow.romSortColumn==gtk_tree_view_column_get_sort_column_id(column))
                g_MainWindow.romSortType = (g_MainWindow.romSortType==GTK_SORT_ASCENDING) ? GTK_SORT_DESCENDING : GTK_SORT_ASCENDING;
            else
                {
                g_MainWindow.romSortColumn = gtk_tree_view_column_get_sort_column_id(column);
                g_MainWindow.romSortType = GTK_SORT_ASCENDING;
                }

            config_put_number("RomSortColumn", g_MainWindow.romSortColumn);
            config_put_number("RomSortType", g_MainWindow.romSortType);
            apply_filter();
            }
        }
}
Exemple #8
0
static unsigned int nf_ip_post_routing_hook(unsigned int hooknum,
		struct sk_buff *skb, const struct net_device *in,
		const struct net_device *out, int(*okfn)(struct sk_buff*)) {
	filter_chain c = get_chain(POST_ROUTING_CHAIN);
	klist_iterator* it;
	filter* f;
	unsigned int retval;

	if (c.len == 0)
		return NF_ACCEPT;

	it = make_klist_iterator(c.filters);
	while (klist_iterator_has_next(it)) {
		f = (filter*) (klist_iterator_next(it))->data;
		if (match_filter(f, skb)) {
			retval = apply_filter(f, hooknum, skb, in, out, okfn);
			if (retval != NF_ACCEPT) {
				free_klist_iterator(it);
				return retval;
			}
		}
	}
	free_klist_iterator(it);

	return NF_ACCEPT;
}
Exemple #9
0
static RCCResult
run(RCCWorker *self, RCCBoolean timedOut, RCCBoolean *newRunCondition) {
  (void)timedOut;(void)newRunCondition;  

  Sym_fir_realProperties *p = self->properties;
  State *myState = self->memories[0];  

  RCCPort
    *in = &self->ports[SYM_FIR_REAL_IN],
    *out = &self->ports[SYM_FIR_REAL_OUT];

  Sym_fir_realInData
    *inData = in->current.data,
    *outData = out->current.data;

  if (in->input.length > out->current.maxLength) {
    self->container.setError( "output buffer too small" );
    return RCC_ERROR;
  }

  switch( in->input.u.operation ) {

  case SYM_FIR_REAL_IN_DATA:
    {
#ifndef NDEBUG
      printf("%s got %zu bytes of data\n", __FILE__,  in->input.length);
#endif

      if ( p->bypass ) {
	self->container.send( out, &in->current, in->input.u.operation, in->input.length);
	return RCC_OK;
      }
      else if (in->input.length) {
	unsigned i;
	double gain = Gain( p->gain);
	unsigned len = byteLen2Real(in->input.length) - UnRoll;
	for ( i=0; i<len; i++ ) {
	  double v = apply_filter( myState->taps, &inData->real[i] );
	  if ( fabs(v) > p->peakDetect ) {
	    p->peakDetect = Scale( fabs(v) );
	  }
	  outData->real[i] =  Scale( gain * v );
	}
      }
    }
    break;

  case SYM_FIR_REAL_IN_SYNC:
  case SYM_FIR_REAL_IN_TIME:
    self->container.send( out, &in->current, in->input.u.operation, in->input.length);
    return RCC_OK;
    break;

  };

  out->output.length = in->input.length;
  out->output.u.operation = in->input.u.operation;
  return RCC_ADVANCE;
}
Exemple #10
0
int main(int argc, char *argv[]){
  
  // Initialize
  
  std::ofstream fout("data.raw",std::ios::binary);
  if( fout.fail() ){ return -1; }
  
  int width = DATA_WIDTH;
  int height = DATA_HEIGHT;
  
  // Create image data1 and draw circles
  
  float* data1 = new float[width*height];
   std::cout <<"In function: " << __FUNCTION__<< std::endl;
  fill_circle( data1, width, height, (1*width)/3, (5*height)/8, width/4 );
  fill_circle( data1, width, height, (4*width)/5, (6*height)/8, width/5 );
  fill_circle( data1, width, height, (2*width)/3, (1*height)/4, width/6 );
  // Create image data2 and draw circles
  
  float* data2 = new float[width*height];

  fill_circle( data2, width, height, width/2, height/2, width/5 );

  // Create a Gaussian filter
  
  int filt_width = FILT_WIDTH;
  int filt_height = FILT_HEIGHT;
  
  float* pfilt = new float[filt_width*filt_height];
  fill_gaussian_filter_data(pfilt,filt_width,filt_height);

  // Create result image data
  // TODO: why do I need sizeof(float) when float is used as type?
  float* data_res = new float[sizeof(float)*width*height];
  
  // Apply Gaussian filter to data1 -> data_res
  apply_filter(data_res, data1, width, height, pfilt, filt_width, filt_height);
  std::cout << "Line : "  << __LINE__ << std::endl;
  // Copy result (data_res) to data1
  memcpy(data1,data_res,width*height*sizeof(data_res)); // HERE IS PROBLEM, RABORT MID OR I FEED
  std::cout << "Line : "  << __LINE__ << std::endl;
  // Take larges pixel value from data1 and data2 and store in data_res
  copy_max_value(data_res, data1, data2, width, height);
  std::cout << "Line : "  << __LINE__ << std::endl;
  // Save result to file and clean up
  
  fout.write(reinterpret_cast<char*>(data_res),width*height*sizeof(data_res));
  if( fout.fail() ){ return -1; }
  fout.close();
  std::cout << "Line : "  << __LINE__ << std::endl;
  delete[] data1;
  delete[] data2;
  delete[] pfilt;
  delete[] data_res;
  std::cout << "Line : "  << __LINE__ << std::endl;
  return 0;
}
Exemple #11
0
int main(int argc, char** argv) {
  char* filter_name = argv[1];
  char* in_audio_file_r = argv[2];
  char* in_audio_file_l = argv[3];
  char* out_audio_file = argv[4];

  read_filter(filter_name);

  int angle = -30; float elevation = 0;

  apply_filter(in_audio_file_r, in_audio_file_l, out_audio_file, angle, elevation);
}
Exemple #12
0
static void make_flick_y(struct test_data *t_data, int filter)
{
	int i;

	memset(t_data, 0, sizeof(*t_data));
	memcpy(t_data->y, flick_y, sizeof(flick_y));

	t_data->event_count = sizeof(flick_y) / sizeof(u16);
	t_data->event_index = FIXED_ID;

	if(filter)
		apply_filter(t_data->y, t_data->event_count);

	for(i=0; i<t_data->event_count; i++){
		t_data->x[i] = touch_test_dev->pdata->caps->x_max /2;
		TOUCH_INFO_MSG("[Touch Tester] pos[%4d,%4d]\n", t_data->x[i], t_data->y[i]);
	}
}
static const char *
filter_querystring(struct filter_context *context)
{
	const char *uri = context->uri;
	const char *query_string;
	const char *filtered_uri;

	if (uri == NULL) {
		return NULL;
	}

	query_string = strchr(uri, '?');

	if (query_string == NULL) {
		return uri;
	}

	if (query_string[1] == '\0') {
		return truncate_querystring(context->ws, uri, query_string);
	}

	if (context->type == regfilter) {
		void *re = compile_regex(context->params.regfilter.regex);
		if (re == NULL) {
			return uri;
		}
		context->params.regfilter.re = re;
	}

	context->query_string = query_string;
	filtered_uri = apply_filter(context);

	if (context->type == regfilter) {
		VRT_re_fini(context->params.regfilter.re);
	}

	return filtered_uri;
}
Exemple #14
0
void
main (int argc, char ** argv)
{
  REAL fr = ((atan2 (1., 1.)) / 1000.);
  struct f_context c;

  if (argc != 4)
    usage ();
  (c.h) =
    (kaiser_bandpass_coefficients (((convert_real_arg (argv[1])) * fr),
				   ((convert_real_arg (argv[2])) * fr),
				   (convert_real_arg (argv[3])),
				   .001,
				   (& (c.m))));
  fprintf (stderr, "Filter order = %d\n", ((c.m) - 1));
  fflush (stderr);
  apply_filter ((make_filter_state ((fileno (stdin)),
				    (fileno (stdout)),
				    8000,
				    (c.m))),
		bandpass,
		(&c));
  exit (0);
}
/* one-dimensional spline filter: */
int NI_SplineFilter1D(PyArrayObject *input, int order, int axis,
                      NI_ExtendMode mode, PyArrayObject *output)
{
    int npoles = 0, more;
    npy_intp kk, lines, len;
    double *buffer = NULL, poles[MAX_SPLINE_FILTER_POLES];
    NI_LineBuffer iline_buffer, oline_buffer;
    NPY_BEGIN_THREADS_DEF;

    len = PyArray_NDIM(input) > 0 ? PyArray_DIM(input, axis) : 1;
    if (len < 1)
        goto exit;

    /* these are used in the spline filter calculation below: */
    if (get_filter_poles(order, &npoles, poles)) {
        goto exit;
    }

    /* allocate an initialize the line buffer, only a single one is used,
         because the calculation is in-place: */
    lines = -1;
    if (!NI_AllocateLineBuffer(input, axis, 0, 0, &lines, BUFFER_SIZE,
                               &buffer)) {
        goto exit;
    }
    if (!NI_InitLineBuffer(input, axis, 0, 0, lines, buffer,
                           NI_EXTEND_DEFAULT, 0.0, &iline_buffer)) {
        goto exit;
    }
    if (!NI_InitLineBuffer(output, axis, 0, 0, lines, buffer,
                           NI_EXTEND_DEFAULT, 0.0, &oline_buffer)) {
        goto exit;
    }
    NPY_BEGIN_THREADS;

    /* iterate over all the array lines: */
    do {
        /* copy lines from array to buffer: */
        if (!NI_ArrayToLineBuffer(&iline_buffer, &lines, &more)) {
            goto exit;
        }
        /* iterate over the lines in the buffer: */
        for(kk = 0; kk < lines; kk++) {
            /* get line: */
            double *ln = NI_GET_LINE(iline_buffer, kk);
            /* spline filter: */
            if (len > 1) {
                apply_filter(ln, len, poles, npoles, mode);
            }
        }

        /* copy lines from buffer to array: */
        if (!NI_LineBufferToArray(&oline_buffer)) {
            goto exit;
        }
    } while(more);

 exit:
    NPY_END_THREADS;
    free(buffer);
    return PyErr_Occurred() ? 0 : 1;
}
static image_reconstruct_ERROR_CODE
do_reconstruction(const image_filter_IMG* original,
                  const image_reconstruct_PARAMETERS* params,
                  image_filter_IMG* result)
{
  image_reconstruct_ERROR_CODE ret = IMAGE_RECONSTRUCT_OK;
  image_filter_FILTER row_filter = {0};
  image_filter_FILTER col_filter = {0};
  image_filter_IMG approx_row = {0};
  image_filter_IMG approx_col = {0};
  image_filter_IMG approx_diag = {0};
  image_filter_IMG tmp = {0};
  double row_mask[] = PREWITT_ROW_MASK;
  double col_mask[] = PREWITT_COL_MASK;


  image_filter_create_filter_by_mask(3,
                                     row_mask,
                                     &row_filter);
  image_filter_create_filter_by_mask(3,
                                     col_mask,
                                     &col_filter);
  image_filter_create_image(original->width,
                            original->height,
                            &approx_row);
  image_filter_create_image(original->width,
                            original->height,
                            &tmp);
  image_filter_create_image(original->width,
                            original->height,
                            &approx_col);
  image_filter_create_image(original->width,
                            original->height,
                            &approx_diag);
  if ((ret = prepare_image(original, params, &tmp))
             != IMAGE_AMPLIFIER_OK)
  {}
  else if ((ret = apply_filter(&tmp,
                               &row_filter,
                               &approx_row)) != IMAGE_RECONSTRUCT_OK)
  {}
  else if ((ret = apply_filter(&tmp,
                               &col_filter,
                               &approx_col)) != IMAGE_RECONSTRUCT_OK)
  {}

  if (ret == IMAGE_RECONSTRUCT_OK)
  {
    haar_transformation_DATA_SOURCE data;

    calculate_sum(&approx_col,
                  &approx_row,
                  NULL,
                  &approx_diag);
 
    data.width = original->width;
    data.height = original->height;
    data.original_image = result->content;
    data.avg_image = tmp.content;
    data.vertical_edges = approx_col.content;
    data.horizontal_edges = approx_row.content;
    data.diagonal_edges = approx_diag.content;

  /*  DUMPER_TRACE_IMAGE(data.avg_image, data.width, data.height);
    DUMPER_TRACE_IMAGE(data.vertical_edges, data.width, data.height);
    DUMPER_TRACE_IMAGE(data.horizontal_edges, data.width, data.height);
    DUMPER_TRACE_IMAGE(data.diagonal_edges, data.width, data.height);
    DUMPER_TRACE_IMAGE(tmp.content, data.width, data.height); */
    data.width *= 2;
    data.height *= 2;
    haar_transformation_inverse_transformation(data);
 /*   DUMPER_TRACE_IMAGE(data.original_image, data.width, data.height); */
  }

  image_filter_destroy_image(&approx_diag);
  image_filter_destroy_image(&tmp);
  image_filter_destroy_image(&approx_col);
  image_filter_destroy_image(&approx_row);
  image_filter_destroy_filter(&row_filter);
  image_filter_destroy_filter(&col_filter);

  return ret;
}
Exemple #17
0
/* Activate filter widget -> filter and resort. */
static void callback_apply_filter(GtkWidget* widget, gpointer data)
{
    apply_filter();
}
Exemple #18
0
static gboolean
receiving_im_cb(PurpleAccount *account, char **sender, char **buffer,
                PurpleConversation *conv, PurpleMessageFlags *flags, void *data)
{
    twitter_debug("called\n");
    twitter_debug("buffer = %s suppress_oops = %d\n", *buffer, suppress_oops);

    gint service;

    service = get_service_type_by_account(account, *sender);
    twitter_debug("service = %d\n", service);

#ifdef _WIN32
    /* suppress notification of incoming messages. */
    if(service != unknown_service &&
       purple_prefs_get_bool(OPT_PREVENT_NOTIFICATION)) {
        if(!blink_modified) {
            blink_modified = TRUE;
            blink_state = purple_prefs_get_bool(OPT_PIDGIN_BLINK_IM);
            purple_prefs_set_bool(OPT_PIDGIN_BLINK_IM, FALSE);
        }
    }
    else {
        if(blink_modified) {
            purple_prefs_set_bool(OPT_PIDGIN_BLINK_IM, blink_state);
            blink_modified = FALSE;
        }
    }
#endif

    if(service == wassr_service) {
        gchar *stripped = strip_html_markup(*buffer);
        /* suppress annoying completion message from wassr */
        if(strstr(*buffer, "<body>チャンネル投稿完了:")) {
            twitter_debug("clearing channel parrot message\n");
            g_free(*sender); *sender = NULL;
            g_free(*buffer); *buffer = NULL;
        }
        /* discard parrot message */
        else {
            GList *current = g_list_first(wassr_parrot_list);
            while(current) {
                GList *next = g_list_next(current);

                if(strstr(stripped, current->data)) {
                    twitter_debug("parrot clearing: buf = %s post = %s\n",
                                  *buffer, (char *)current->data);
                    g_free(*sender); *sender = NULL;
                    g_free(*buffer); *buffer = NULL;
                    g_free(current->data);
                    current->data = NULL;
                    wassr_parrot_list =
                        g_list_delete_link(wassr_parrot_list, current);
                    break;
                }

                current = next;
            }
        }
        g_free(stripped);
    }
    else if(service == identica_service) {
        /* discard parrot message */
        gchar *stripped = strip_html_markup(*buffer);
        GList *current = g_list_first(identica_parrot_list);
        while(current) {
            GList *next = g_list_next(current);

            if(strstr(stripped, current->data)) {
                twitter_debug("identica parrot clearing: buf = %s post = %s\n",
                              *buffer, (char *)current->data);
                g_free(*sender); *sender = NULL;
                g_free(*buffer); *buffer = NULL;
                g_free(current->data);
                current->data = NULL;
                identica_parrot_list =
                    g_list_delete_link(identica_parrot_list, current);
                break;
            }

            current = next;
        }
        g_free(stripped);
    }
    else if(service == ffeed_service) {
        /* discard parrot message */
        gchar *stripped = strip_html_markup(*buffer);
        GList *current = g_list_first(ffeed_parrot_list);
        while(current) {
            GList *next = g_list_next(current);

            if(strstr(stripped, current->data)) {
                twitter_debug("ffeed parrot clearing: buf = %s post = %s\n",
                              *buffer, (char *)current->data);
                g_free(*sender); *sender = NULL;
                g_free(*buffer); *buffer = NULL;
                g_free(current->data);
                current->data = NULL;
                ffeed_parrot_list =
                    g_list_delete_link(ffeed_parrot_list, current);
                break;
            }

            current = next;
        }
        g_free(stripped);
    }

    /* filtering */
    if(purple_prefs_get_bool(OPT_FILTER)) {
        apply_filter(sender, buffer, flags, service);
    }

    /* return here if it is not twitter */
    if(service != twitter_service) {
        return FALSE;
    }

    /* if we use api, discard all incoming IM messages. */
    if(purple_prefs_get_bool(OPT_API_BASE_POST)) {
        g_free(*sender); *sender = NULL;
        g_free(*buffer); *buffer = NULL;
    }

    if(!suppress_oops || !purple_prefs_get_bool(OPT_SUPPRESS_OOPS))
        return FALSE;

    if(strstr(*buffer, OOPS_MESSAGE)) {
        twitter_debug("clearing sender and buffer\n");
        g_free(*sender); *sender = NULL;
        g_free(*buffer); *buffer = NULL;
        suppress_oops = FALSE;
    }
    return FALSE;
}
Exemple #19
0
    // CORE FUNCTION: decode a block of data
    void block_decode(float *input1[2], float *input2[2], float *output[6], float center_width, float dimension, float adaption_rate) {
        // 1. scale the input by the window function; this serves a dual purpose:
        // - first it improves the FFT resolution b/c boundary discontinuities (and their frequencies) get removed
        // - second it allows for smooth blending of varying filters between the blocks
        {
            float* pWnd = &wnd[0];
            float* pLt = &lt[0];
            float* pRt = &rt[0];
            float* pIn0 = input1[0];
            float* pIn1 = input1[1];
            for (unsigned k=0;k<halfN;k++) {
                *pLt++ = *pIn0++ * *pWnd;
                *pRt++ = *pIn1++ * *pWnd++;
            }
            pIn0 = input2[0];
            pIn1 = input2[1];
            for (unsigned k=0;k<halfN;k++) {
                *pLt++ = *pIn0++ * *pWnd;
                *pRt++ = *pIn1++ * *pWnd++;
            }
        }

#ifdef USE_FFTW3
        // ... and tranform it into the frequency domain
        fftwf_execute(loadL);
        fftwf_execute(loadR);
#else
        ff_fft_permuteRC(fftContextForward, lt, (FFTComplex*)&dftL[0]);
        av_fft_calc(fftContextForward, (FFTComplex*)&dftL[0]);

        ff_fft_permuteRC(fftContextForward, rt, (FFTComplex*)&dftR[0]);
        av_fft_calc(fftContextForward, (FFTComplex*)&dftR[0]);
#endif

        // 2. compare amplitude and phase of each DFT bin and produce the X/Y coordinates in the sound field
        //    but dont do DC or N/2 component
        for (unsigned f=0;f<halfN;f++) {           
            // get left/right amplitudes/phases
            float ampL = amplitude(dftL[f]), ampR = amplitude(dftR[f]);
            float phaseL = phase(dftL[f]), phaseR = phase(dftR[f]);
//          if (ampL+ampR < epsilon)
//              continue;       

            // calculate the amplitude/phase difference
            float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL));
            float phaseDiff = phaseL - phaseR;
            if (phaseDiff < -PI) phaseDiff += 2*PI;
            if (phaseDiff > PI) phaseDiff -= 2*PI;
            phaseDiff = abs(phaseDiff);

            if (linear_steering) {
                // --- this is the fancy new linear mode ---

                // get sound field x/y position
                yfs[f] = get_yfs(ampDiff,phaseDiff);
                xfs[f] = get_xfs(ampDiff,yfs[f]);

                // add dimension control
                yfs[f] = clamp(yfs[f] - dimension);

                // add crossfeed control
                xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2));

                // 3. generate frequency filters for each output channel
                float left = (1-xfs[f])/2, right = (1+xfs[f])/2;
                float front = (1+yfs[f])/2, back = (1-yfs[f])/2;
                float volume[5] = {
                    front * (left * center_width + max(0,-xfs[f]) * (1-center_width)),  // left
                    front * center_level*((1-abs(xfs[f])) * (1-center_width)),          // center
                    front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right
                    back * surround_level * left,                                       // left surround
                    back * surround_level * right                                       // right surround
                };

                // adapt the prior filter
                for (unsigned c=0;c<5;c++)
                    filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c];

            } else {
                // --- this is the old & simple steering mode ---

                // calculate the amplitude/phase difference
                float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL));
                float phaseDiff = phaseL - phaseR;
                if (phaseDiff < -PI) phaseDiff += 2*PI;
                if (phaseDiff > PI) phaseDiff -= 2*PI;
                phaseDiff = abs(phaseDiff);

                // determine sound field x-position
                xfs[f] = ampDiff;

                // determine preliminary sound field y-position from phase difference
                yfs[f] = 1 - (phaseDiff/PI)*2;

                if (abs(xfs[f]) > surround_balance) {
                    // blend linearly between the surrounds and the fronts if the balance exceeds the surround encoding balance
                    // this is necessary because the sound field is trapezoidal and will be stretched behind the listener
                    float frontness = (abs(xfs[f]) - surround_balance)/(1-surround_balance);
                    yfs[f]  = (1-frontness) * yfs[f] + frontness * 1; 
                }

                // add dimension control
                yfs[f] = clamp(yfs[f] - dimension);

                // add crossfeed control
                xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2));

                // 3. generate frequency filters for each output channel, according to the signal position
                // the sum of all channel volumes must be 1.0
                float left = (1-xfs[f])/2, right = (1+xfs[f])/2;
                float front = (1+yfs[f])/2, back = (1-yfs[f])/2;
                float volume[5] = {
                    front * (left * center_width + max(0,-xfs[f]) * (1-center_width)),      // left
                    front * center_level*((1-abs(xfs[f])) * (1-center_width)),              // center
                    front * (right * center_width + max(0, xfs[f]) * (1-center_width)),     // right
                    back * surround_level*max(0,min(1,((1-(xfs[f]/surround_balance))/2))),  // left surround
                    back * surround_level*max(0,min(1,((1+(xfs[f]/surround_balance))/2)))   // right surround
                };

                // adapt the prior filter
                for (unsigned c=0;c<5;c++)
                    filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c];
            }

            // ... and build the signal which we want to position
            frontL[f] = polar(ampL+ampR,phaseL);
            frontR[f] = polar(ampL+ampR,phaseR);
            avg[f] = frontL[f] + frontR[f];
            surL[f] = polar(ampL+ampR,phaseL+phase_offsetL);
            surR[f] = polar(ampL+ampR,phaseR+phase_offsetR);
            trueavg[f] = cfloat(dftL[f][0] + dftR[f][0], dftL[f][1] + dftR[f][1]);
        }

        // 4. distribute the unfiltered reference signals over the channels
        apply_filter(&frontL[0],&filter[0][0],&output[0][0]);   // front left
        apply_filter(&avg[0], &filter[1][0],&output[1][0]);     // front center
        apply_filter(&frontR[0],&filter[2][0],&output[2][0]);   // front right
        apply_filter(&surL[0],&filter[3][0],&output[3][0]);     // surround left
        apply_filter(&surR[0],&filter[4][0],&output[4][0]);     // surround right
        apply_filter(&trueavg[0],&filter[5][0],&output[5][0]);  // lfe
    }
int
  main(int argc, char *argv[])
{
  if (argc != 6)
    {
      fprintf(stderr,"Usage: %s <in bmp file> <out bmp file>\n",argv[0]);
      return -1;
    }

  int input_c = atoi(argv[3]);
  int input_r = atoi(argv[4]);
  int N = atoi(argv[5]);



  int err_code=0;
  try {
      // Read the input image
      bmp_in in;
      if ((err_code = bmp_in__open(&in,argv[1])) != 0)
        throw err_code;

      int width = in.cols, height = in.rows;
      int n, num_comps = in.num_components;
      my_image_comp *input_comps = new my_image_comp[num_comps];
      for (n=0; n < num_comps; n++)
        input_comps[n].init(height,width,N/2); // Leave a border of 4
      
      int r; // Declare row index
      io_byte *line = new io_byte[width*num_comps];
      for (r=height-1; r >= 0; r--)
        { // "r" holds the true row index we are reading, since the image is
          // stored upside down in the BMP file.
          if ((err_code = bmp_in__get_line(&in,line)) != 0)
            throw err_code;
          for (n=0; n < num_comps; n++)
            {
              io_byte *src = line+n; // Points to first sample of component n
              float *dst = input_comps[n].buf + r * input_comps[n].stride;
              for (int c=0; c < width; c++, src+=num_comps)
                dst[c] = (float) *src; // The cast to type "float" is not
                      // strictly required here, since bytes can always be
                      // converted to floats without any loss of information.
            }
        }
      bmp_in__close(&in);

      // Allocate storage for the filtered output
      my_image_comp *output_comps = new my_image_comp[num_comps];
      for (n=0; n < num_comps; n++)
        output_comps[n].init(N,N,0); // Don't need a border for output

      // Process the image, all in floating point (easy)
      for (n=0; n < num_comps; n++)
        input_comps[n].perform_boundary_extension();
      for (n=0; n < num_comps; n++)
        apply_filter(input_comps+n,output_comps+n,N,input_c,input_r);


	   

      // Write the image back out again
      bmp_out out;
      if ((err_code = bmp_out__open(&out,argv[2],N,N,num_comps)) != 0)
        throw err_code;
      for (r=N-1; r >= 0; r--)
        { // "r" holds the true row index we are writing, since the image is
          // written upside down in BMP files.
          for (n=0; n < num_comps; n++)
            {
              io_byte *dst = line+n; // Points to first sample of component n
              float *src = output_comps[n].buf + r * output_comps[n].stride;
              for (int c=0; c < N; c++, dst+=num_comps)
                *dst = (io_byte) src[c]; // The cast to type "io_byte" is
                      // required here, since floats cannot generally be
                      // converted to bytes without loss of information.  The
                      // compiler will warn you of this if you remove the cast.
                      // There is in fact not the best way to do the
                      // conversion.  You should fix it up in the lab.
            }
          bmp_out__put_line(&out,line);
        }
      bmp_out__close(&out);
      delete[] line;
      delete[] input_comps;
      delete[] output_comps;
    }
  catch (int exc) {
      if (exc == IO_ERR_NO_FILE)
        fprintf(stderr,"Cannot open supplied input or output file.\n");
      else if (exc == IO_ERR_FILE_HEADER)
        fprintf(stderr,"Error encountered while parsing BMP file header.\n");
      else if (exc == IO_ERR_UNSUPPORTED)
        fprintf(stderr,"Input uses an unsupported BMP file format.\n  Current "
                "simple example supports only 8-bit and 24-bit data.\n");
      else if (exc == IO_ERR_FILE_TRUNC)
        fprintf(stderr,"Input or output file truncated unexpectedly.\n");
      else if (exc == IO_ERR_FILE_NOT_OPEN)
        fprintf(stderr,"Trying to access a file which is not open!(?)\n");
      return -1;
    }
  return 0;
}
Exemple #21
0
// MAIN PROGRAM
int main(int argc, char *argv[]) {



	//The image that is going to be blurred
	IMAGE *image = NULL;

	//The resulting image
	IMAGE *result = NULL;

	//The used filter
	FILTER *filter;

	//Info
	char image_file_name[50];
	char result_file_name[50];
	int radius;
	double sigma;

	//Arguments: argv[0]="path", argv[1]="image_name.ppm", argv[2]="result_image_name.ppm" argv[3]="radius" argv[4]="sigma"
	if(argc == 5) {	//If enought arguments given take the info from the them
		//Original image file name
		strcpy(image_file_name, argv[1]);

		//Result image file name
		strcpy(result_file_name, argv[2]);

		//Convert radius
		radius = atoi(argv[3]);
	
		//Convert sigma
		sigma = atof(argv[4]);
	} else { //Read info from keyboard
		//Original image file name
		printf("Original image name: ");
		scanf("%s", image_file_name);
		
		//Result image file name
		printf("Result image name: ");
		scanf("%s", result_file_name);

		//Read radius
		printf("Radius: ");
		scanf("%d", &radius);

		//Read sigma
		printf("Sigma: ");
		scanf("%lf", &sigma);
	}

	//Load image
	printf("Loading image...\n");
	image = image_load(image_file_name);
	
	//Create filter
	printf("Creating filter...\n");
	filter = filter_create_gauss(radius, sigma);

	//Apply filter
	printf("Appling filter...\n");
	result = apply_filter(image, filter);

	//Write image to disk
	printf("Writing image to disk...\n");
	image_write(result, result_file_name);

	//Free memory
	image_free(image);
	image_free(result);
	filter_free(filter);

	printf("DONE!\n");

	return 0;
}
Exemple #22
0
void *productor(void *params) {
	prod_param *parg = (prod_param *) params;
	int err;
	
	//3 cas : on est un blur, on est avant un blur ou bien le cas normal. Si on est avant un blur, il faut lancer la copie de l'image quand, dans mat_blur, on a, pour l'image, le nombre de paquet
	//en lesquels elle a été divisée. Pour cela, il faut un tableau global npackim[NIMAGE] qui donne, pr chaque image, en combien de paquet elle a été divisée ou bien, on divise chaque image en
	//NPACK/NIMAGE de paquet (on divise chacune des images avec le ppcm du nombre de thread), ce qui fait qu'on a le nombre de paquet dans une image avec le nombre de paquet total
	//ces 3 cas sont dans le while
	while(1) {

		err = pthread_mutex_lock(&(to_pass_mutex[parg->num_filter]));
		if (err!=0)
			error(err,"pthread_mutex_lock pour to_pass_mutex dans producteur");
		if(to_pass[parg->num_filter]>=NPACK){
			err = pthread_mutex_unlock(&(to_pass_mutex[parg->num_filter]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour to_pass_mutex dans producteur");
			pthread_exit(NULL);
		}
		to_pass[parg->num_filter]++;
		err = pthread_mutex_unlock(&(to_pass_mutex[parg->num_filter]));
		if (err!=0)
			error(err,"pthread_mutex_unlock pour to_pass_mutex dans producteur");

		printf(".");
		fflush(stdout);
		//cas du filtre blur
		if((parg->isblur)!=0) {
			
			//1e étape : vérifer qu'il y a un des éléments de matblur en la ligne isblur-1
			//printf("Je suis dans le cas blur\n");
			//err = pthread_mutex_lock(&(check_for_c_mutex[(parg->isblur)-1])); //deux threads ne peuvent rechercher un c en même temps sinon ils copieront la même image

			err = sem_wait(&(can_copi[(parg->isblur)-1])); 
			if (err!=0)
				error(err,"sem_wait sur can_copi dans productor");

			err = pthread_mutex_lock(&(can_i_take_c_mutex[(parg->isblur)-1]));//ce lock est nécessaire car la sémaphore ne fait des wait que pour empêcher des thread d'entrer quand il n'y a pas d'image prête pour la copie. Mais la sémaphore  peut être incrémentée par le fait que deux images différentes peuvent être prête presque en même temps pour la copie. Alors, il faut faire un lock sur le can_i_take_c_mutex pour éviter qu'il n'y ait deux threads qui entrent en même temps dans le check_lmat_blur, ce qui poserait des problème vu qu'une des threads pourraient lire dans can_i_take_c pendant que l'autre écrit dedans.
			if (err!=0)
				error(err,"pthread_mutex_lock pour can_i_take_c_mutex dans producteur du cas blur");

			int c = check_lmat_blur((parg->isblur)-1);
			if(c==-1) {
				error(-1,"check_lmat_blur a renvoyé -1");
			}
			
			if (can_i_take_c[(parg->isblur)-1][c] < NPACK/NIMAGE) {
				err = sem_post(&(can_copi[(parg->isblur)-1])); 
				if (err!=0)
					error(err,"sem_post sur can_copi dans productor");
			}
			
			err = pthread_mutex_unlock(&(can_i_take_c_mutex[(parg->isblur)-1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour can_i_take_c_mutex dans producteur du cas blur");

			//	error(err,"pthread_mutex_unlock pour check_for_c_mutex dans producteur du cas blur");

			//2e étape : on a une image dont le précédent a été appliquer sur toutes les parties. On la copie

			err = pthread_mutex_lock(&(copi_is_done_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour copi_is_done_mutex dans producteur du cas blur pour image_copi");

			if(copi_is_done[(parg->isblur)-1][c]==0) {

				err = image_copi(c,parg->num_filter); //on donne le numéro correspondant à l'image et le numéro de la ligne dans le mat_buf où les parties de cette image se trouve. Il faut mettre à jour les éléments read-only des pointeur vers des elem_buf dans cette ligne (note, il est nécessaire d'avoir une structure copie par image mais pas plus ! Car quand on fait une 2e copie pour un 2e blur, alors on peut écraser l'ancienne copie !)
				if (err!=0) {
					error(err,"Erreur de image_copi");
				}
				copi_is_done[(parg->isblur)-1][c]++;				
			}
			err = pthread_mutex_unlock(&(copi_is_done_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour copi_is_done_mutex dans producteur du cas blur pour image_copi");

			//3e étape : on a fait les copies. Mnt, on doit appeler le consommateur en lui-disant de consommer un elem-buffer mais dont l'image est c
			elem_buf *paquet = consommator(parg->num_filter,c);//mettre c en 2e argument permet de dire que tous les elem_buf sont bons, quelque soit l'image
			apply_filter(paquet,parg->num_filter);
			
			err = sem_wait(&(empty[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_wait sur empty dans productor");
			
			err = pthread_mutex_lock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour buf_mutex dans producteur");
			
			insert_elem(&paquet,(parg->num_filter)+1);
			
			err = pthread_mutex_lock(&(choosen_c_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour choosen_c_mutex dans producteur, check_lmat_blur");

			choosen_c[(parg->isblur)-1][c]++;

			if((choosen_c[(parg->isblur)-1][c]==NPACK/NIMAGE)) {
				err = image_copi_destroy(c,parg->num_filter+1);

				if (err!=0)
					error(err,"Erreur de image_copi_destroy");
			}

			err = pthread_mutex_unlock(&(choosen_c_mutex[(parg->isblur)-1][c]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour choosen_c_mutex dans producteur");
			
			err = pthread_mutex_unlock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour buf_mutex dans producteur");
			
			err = sem_post(&(full[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_post sur full dans productor");
			
			//on est avant un filtre blur
			if((parg->beforeblur)!=0) {//parg_beforeblur contient 1 si c'est le premier blur, 2 si c'est le deuxième, ...

				err = pthread_mutex_lock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_lock pour blur_mutex dans producteur");
				
				(mat_blur[(parg->beforeblur)-1][(paquet)->num_img])++;

				if ((mat_blur[(parg->beforeblur)-1][(paquet)->num_img])==NPACK/NIMAGE) {
					err = sem_post(&(can_copi[(parg->beforeblur)-1])); 
					if (err!=0)
						error(err,"sem_post sur can_copi dans productor, before_blur");
				}
				
				err = pthread_mutex_unlock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_unlock pour blur_mutex dans producteur");
			}
			
		} else {
		//cas normal

			elem_buf *paquet = consommator(parg->num_filter,-1);//mettre -1 en 2e argument permet de dire que tous les elem_buf sont bons, quelque soit l'image
			apply_filter(paquet,parg->num_filter);
			err = sem_wait(&(empty[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_wait sur empty dans productor");
			err = pthread_mutex_lock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_lock pour buf_mutex dans producteur");
			insert_elem(&paquet,(parg->num_filter)+1);
			err = pthread_mutex_unlock(&(buf_mutex[(parg->num_filter)+1]));
			if (err!=0)
				error(err,"pthread_mutex_unlock pour buf_mutex dans producteur");
			
			err = sem_post(&(full[(parg->num_filter)+1])); //amélioration possible du code : ce sem_wait ne sert pas vraiment, vu que on sait de toute façon que les buffer ne vont pas être rempli
			if (err!=0)
				error(err,"sem_post sur full dans productor");
			
			//on est avant un filtre blur
			if((parg->beforeblur)!=0) {//parg_beforeblur contient 1 si c'est le premier blur, 2 si c'est le deuxième, ...

				err = pthread_mutex_lock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_lock pour blur_mutex dans producteur");
				
				(mat_blur[(parg->beforeblur)-1][(paquet)->num_img])++;

				if ((mat_blur[(parg->beforeblur)-1][(paquet)->num_img])==NPACK/NIMAGE) {
					err = sem_post(&(can_copi[(parg->beforeblur)-1])); 
					if (err!=0)
						error(err,"sem_post sur can_copi dans productor, before_blur");
				}

				err = pthread_mutex_unlock(&(blur_mutex[(parg->beforeblur)-1][(paquet)->num_img]));
				if (err!=0)
					error(err,"pthread_mutex_unlock pour blur_mutex dans producteur");
			}
		}
	}
}