예제 #1
0
// Implement row loopfiltering for each thread.
static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer,
                                VP9_COMMON *const cm, MACROBLOCKD *const xd,
                                int start, int stop, int y_only,
                                VP9LfSync *const lf_sync, int num_lf_workers) {
  const int num_planes = y_only ? 1 : MAX_MB_PLANE;
  int r, c;  // SB row and col
  const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;

  for (r = start; r < stop; r += num_lf_workers) {
    const int mi_row = r << MI_BLOCK_SIZE_LOG2;
    MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;

    for (c = 0; c < sb_cols; ++c) {
      const int mi_col = c << MI_BLOCK_SIZE_LOG2;
      LOOP_FILTER_MASK lfm;
      int plane;

      sync_read(lf_sync, r, c);

      vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col);
      vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);

      for (plane = 0; plane < num_planes; ++plane) {
        vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm);
      }

      sync_write(lf_sync, r, c, sb_cols);
    }
  }
}
예제 #2
0
void load_file( const char *path, const char *mode, double *buff, size_t size )
{
	FILE *fp;

    fp = fgls_fopen( path, mode );
    sync_read( buff, fp, size, 0 );
    fclose( fp );
}
예제 #3
0
static int loop_filter_row_worker(AV1LfSync *const lf_sync,
                                  LFWorkerData *const lf_data) {
  const int num_planes = lf_data->y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols =
      mi_cols_aligned_to_sb(lf_data->cm) >> lf_data->cm->mib_size_log2;
  int mi_row, mi_col;
#if !CONFIG_EXT_PARTITION_TYPES
  enum lf_path path = get_loop_filter_path(lf_data->y_only, lf_data->planes);
#endif  // !CONFIG_EXT_PARTITION_TYPES

#if CONFIG_EXT_PARTITION
  printf(
      "STOPPING: This code has not been modified to work with the "
      "extended coding unit size experiment");
  exit(EXIT_FAILURE);
#endif  // CONFIG_EXT_PARTITION

  for (mi_row = lf_data->start; mi_row < lf_data->stop;
       mi_row += lf_sync->num_workers * lf_data->cm->mib_size) {
    MODE_INFO **const mi =
        lf_data->cm->mi_grid_visible + mi_row * lf_data->cm->mi_stride;

    for (mi_col = 0; mi_col < lf_data->cm->mi_cols;
         mi_col += lf_data->cm->mib_size) {
      const int r = mi_row >> lf_data->cm->mib_size_log2;
      const int c = mi_col >> lf_data->cm->mib_size_log2;
#if !CONFIG_EXT_PARTITION_TYPES
      LOOP_FILTER_MASK lfm;
#endif
      int plane;

      sync_read(lf_sync, r, c);

      av1_setup_dst_planes(lf_data->planes, lf_data->cm->sb_size,
                           lf_data->frame_buffer, mi_row, mi_col);
#if CONFIG_EXT_PARTITION_TYPES
      for (plane = 0; plane < num_planes; ++plane) {
        av1_filter_block_plane_non420_ver(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
        av1_filter_block_plane_non420_hor(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
      }
#else
      av1_setup_mask(lf_data->cm, mi_row, mi_col, mi + mi_col,
                     lf_data->cm->mi_stride, &lfm);

      for (plane = 0; plane < num_planes; ++plane) {
        loop_filter_block_plane_ver(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
        loop_filter_block_plane_hor(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
      }
#endif  // CONFIG_EXT_PARTITION_TYPES
      sync_write(lf_sync, r, c, sb_cols);
    }
  }
  return 1;
}
예제 #4
0
// Implement row loopfiltering for each thread.
static INLINE
void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
                             VP9_COMMON *const cm,
                             struct macroblockd_plane planes[MAX_MB_PLANE],
                             int start, int stop, int y_only,
                             VP9LfSync *const lf_sync) {
  const int num_planes = y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
  int mi_row, mi_col;
  enum lf_path path;
  if (y_only)
    path = LF_PATH_444;
  else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1)
    path = LF_PATH_420;
  else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0)
    path = LF_PATH_444;
  else
    path = LF_PATH_SLOW;

  for (mi_row = start; mi_row < stop;
       mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
    MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;

    for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
      const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
      const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
      LOOP_FILTER_MASK lfm;
      int plane;

      sync_read(lf_sync, r, c);

      vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);

      // TODO(JBB): Make setup_mask work for non 420.
      vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
                     &lfm);

      vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
      for (plane = 1; plane < num_planes; ++plane) {
        switch (path) {
          case LF_PATH_420:
            vp9_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
            break;
          case LF_PATH_444:
            vp9_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
            break;
          case LF_PATH_SLOW:
            vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
                                          mi_row, mi_col);
            break;
        }
      }

      sync_write(lf_sync, r, c, sb_cols);
    }
  }
}
예제 #5
0
int main()
{
	state = sync_init("file.txt", 4096*2, "log.txt");
	if (state != 0)
	{
		printf("sync_init fail\n");
		return -1;
	}

	state = sync_read(ans, 10, 4095);
	if (state != 0)
	{	
		printf("sync_read fail\n");
		return -1;
	}
	ans[10] = '\0';
	printf("read answer: %s\n", ans);

	state = sync_write(ans, 10, 1);
	if (state != 0)
	{	
		printf("sync_write fail\n");
		return -1;
	}
	state = sync_read(ans, 10, 1);
	if (state != 0)
	{	
		printf("sync_read fail\n");
		return -1;
	}
	ans[10] = '\0';
	printf("read answer: %s\n", ans);

	state = sync_exit();
	if (state != 0)
	{	
		printf("sync_exit fail\n");
		return -1;
	}

	return 0;
}
예제 #6
0
/* Load root catalogue.  We build registry entry by hand.  */
void root_init(int new_p) {
//	bc_check("checking root 1", r_db);
	if(r_c)
		memset(r_c, 0, NPAGES);
	else if(!(r_c = ppn_to_phys(palloc(-1, NPAGES))))
                fatal(Cannot fail);
//	bc_check("checking root 2", r_db);

	/* Read in the old root catalogue. */
	if(!new_p)
#ifdef EXOPC
	        assert (0);
#else		
		sync_read(r_c, r_db, R_NBLOCKS);
#endif
	/* Initialize a new root catalogue */
	else {
예제 #7
0
static int loop_filter_hor_row_worker(AV1LfSync *const lf_sync,
                                      LFWorkerData *const lf_data) {
  const int num_planes = lf_data->y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols =
      mi_cols_aligned_to_sb(lf_data->cm) >> lf_data->cm->mib_size_log2;
  int mi_row, mi_col;
#if !CONFIG_EXT_PARTITION_TYPES
  enum lf_path path = get_loop_filter_path(lf_data->y_only, lf_data->planes);
#endif

  for (mi_row = lf_data->start; mi_row < lf_data->stop;
       mi_row += lf_sync->num_workers * lf_data->cm->mib_size) {
    MODE_INFO **const mi =
        lf_data->cm->mi_grid_visible + mi_row * lf_data->cm->mi_stride;

    for (mi_col = 0; mi_col < lf_data->cm->mi_cols;
         mi_col += lf_data->cm->mib_size) {
      const int r = mi_row >> lf_data->cm->mib_size_log2;
      const int c = mi_col >> lf_data->cm->mib_size_log2;
      LOOP_FILTER_MASK lfm;
      int plane;

      // TODO([email protected]): For better parallelization, reorder
      // the outer loop to column-based and remove the synchronizations here.
      sync_read(lf_sync, r, c);

      av1_setup_dst_planes(lf_data->planes, lf_data->cm->sb_size,
                           lf_data->frame_buffer, mi_row, mi_col);
      av1_setup_mask(lf_data->cm, mi_row, mi_col, mi + mi_col,
                     lf_data->cm->mi_stride, &lfm);
#if CONFIG_EXT_PARTITION_TYPES
      for (plane = 0; plane < num_planes; ++plane)
        av1_filter_block_plane_non420_hor(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
#else
      for (plane = 0; plane < num_planes; ++plane)
        loop_filter_block_plane_hor(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
#endif
      sync_write(lf_sync, r, c, sb_cols);
    }
  }
  return 1;
}
예제 #8
0
파일: rtl_fm.c 프로젝트: howard0su/rtl-sdr
int main(int argc, char **argv)
{
#ifndef _WIN32
	struct sigaction sigact;
#endif
	struct fm_state fm; 
	char *filename = NULL;
	int n_read, r, opt, wb_mode = 0;
	int i, gain = AUTO_GAIN; // tenths of a dB
	uint8_t *buffer;
	uint32_t dev_index = 0;
	int device_count;
	int ppm_error = 0;
	char vendor[256], product[256], serial[256];
	fm_init(&fm);
	pthread_cond_init(&data_ready, NULL);
	pthread_rwlock_init(&data_rw, NULL);
	pthread_mutex_init(&data_mutex, NULL);

	while ((opt = getopt(argc, argv, "d:f:g:s:b:l:o:t:r:p:EFA:NWMULRDCh")) != -1) {
		switch (opt) {
		case 'd':
			dev_index = atoi(optarg);
			break;
		case 'f':
			if (fm.freq_len >= FREQUENCIES_LIMIT) {
				break;}
			if (strchr(optarg, ':'))
				{frequency_range(&fm, optarg);}
			else
			{
				fm.freqs[fm.freq_len] = (uint32_t)atofs(optarg);
				fm.freq_len++;
			}
			break;
		case 'g':
			gain = (int)(atof(optarg) * 10);
			break;
		case 'l':
			fm.squelch_level = (int)atof(optarg);
			break;
		case 's':
			fm.sample_rate = (uint32_t)atofs(optarg);
			break;
		case 'r':
			fm.output_rate = (int)atofs(optarg);
			break;
		case 'o':
			fm.post_downsample = (int)atof(optarg);
			if (fm.post_downsample < 1 || fm.post_downsample > MAXIMUM_OVERSAMPLE) {
				fprintf(stderr, "Oversample must be between 1 and %i\n", MAXIMUM_OVERSAMPLE);}
			break;
		case 't':
			fm.conseq_squelch = (int)atof(optarg);
			if (fm.conseq_squelch < 0) {
				fm.conseq_squelch = -fm.conseq_squelch;
				fm.terminate_on_squelch = 1;
			}
			break;
		case 'p':
			ppm_error = atoi(optarg);
			break;
		case 'E':
			fm.edge = 1;
			break;
		case 'F':
			fm.fir_enable = 1;
			break;
		case 'A':
			if (strcmp("std",  optarg) == 0) {
				fm.custom_atan = 0;}
			if (strcmp("fast", optarg) == 0) {
				fm.custom_atan = 1;}
			if (strcmp("lut",  optarg) == 0) {
				atan_lut_init();
				fm.custom_atan = 2;}
			break;
		case 'D':
			fm.deemph = 1;
			break;
		case 'C':
			fm.dc_block = 1;
			break;
		case 'N':
			fm.mode_demod = &fm_demod;
			break;
		case 'W':
			wb_mode = 1;
			fm.mode_demod = &fm_demod;
			fm.sample_rate = 170000;
			fm.output_rate = 32000;
			fm.custom_atan = 1;
			fm.post_downsample = 4;
			fm.deemph = 1;
			fm.squelch_level = 0;
			break;
		case 'M':
			fm.mode_demod = &am_demod;
			break;
		case 'U':
			fm.mode_demod = &usb_demod;
			break;
		case 'L':
			fm.mode_demod = &lsb_demod;
			break;
		case 'R':
			fm.mode_demod = &raw_demod;
			break;
		case 'h':
		default:
			usage();
			break;
		}
	}
	/* quadruple sample_rate to limit to Δθ to ±π/2 */
	fm.sample_rate *= fm.post_downsample;

	if (fm.freq_len == 0) {
		fprintf(stderr, "Please specify a frequency.\n");
		exit(1);
	}

	if (fm.freq_len >= FREQUENCIES_LIMIT) {
		fprintf(stderr, "Too many channels, maximum %i.\n", FREQUENCIES_LIMIT);
		exit(1);
	}

	if (fm.freq_len > 1 && fm.squelch_level == 0) {
		fprintf(stderr, "Please specify a squelch level.  Required for scanning multiple frequencies.\n");
		exit(1);
	}

	if (fm.freq_len > 1) {
		fm.terminate_on_squelch = 0;
	}

	if (argc <= optind) {
		filename = "-";
	} else {
		filename = argv[optind];
	}

	ACTUAL_BUF_LENGTH = lcm_post[fm.post_downsample] * DEFAULT_BUF_LENGTH;
	buffer = malloc(ACTUAL_BUF_LENGTH * sizeof(uint8_t));

	device_count = rtlsdr_get_device_count();
	if (!device_count) {
		fprintf(stderr, "No supported devices found.\n");
		exit(1);
	}

	fprintf(stderr, "Found %d device(s):\n", device_count);
	for (i = 0; i < device_count; i++) {
		rtlsdr_get_device_usb_strings(i, vendor, product, serial);
		fprintf(stderr, "  %d:  %s, %s, SN: %s\n", i, vendor, product, serial);
	}
	fprintf(stderr, "\n");

	fprintf(stderr, "Using device %d: %s\n",
		dev_index, rtlsdr_get_device_name(dev_index));

	r = rtlsdr_open(&dev, dev_index);
	if (r < 0) {
		fprintf(stderr, "Failed to open rtlsdr device #%d.\n", dev_index);
		exit(1);
	}
#ifndef _WIN32
	sigact.sa_handler = sighandler;
	sigemptyset(&sigact.sa_mask);
	sigact.sa_flags = 0;
	sigaction(SIGINT, &sigact, NULL);
	sigaction(SIGTERM, &sigact, NULL);
	sigaction(SIGQUIT, &sigact, NULL);
	sigaction(SIGPIPE, &sigact, NULL);
#else
	SetConsoleCtrlHandler( (PHANDLER_ROUTINE) sighandler, TRUE );
#endif

	/* WBFM is special */
	// I really should loop over everything
	// but you are more wrong for scanning broadcast FM
	if (wb_mode) {
		fm.freqs[0] += 16000;
	}

	if (fm.deemph) {
		fm.deemph_a = (int)round(1.0/((1.0-exp(-1.0/(fm.output_rate * 75e-6)))));
	}

	optimal_settings(&fm, 0, 0);
	build_fir(&fm);

	/* Set the tuner gain */
	if (gain == AUTO_GAIN) {
		r = rtlsdr_set_tuner_gain_mode(dev, 0);
	} else {
		r = rtlsdr_set_tuner_gain_mode(dev, 1);
		gain = nearest_gain(gain);
		r = rtlsdr_set_tuner_gain(dev, gain);
	}
	if (r != 0) {
		fprintf(stderr, "WARNING: Failed to set tuner gain.\n");
	} else if (gain == AUTO_GAIN) {
		fprintf(stderr, "Tuner gain set to automatic.\n");
	} else {
		fprintf(stderr, "Tuner gain set to %0.2f dB.\n", gain/10.0);
	}
	r = rtlsdr_set_freq_correction(dev, ppm_error);

	if (strcmp(filename, "-") == 0) { /* Write samples to stdout */
		fm.file = stdout;
#ifdef _WIN32
		_setmode(_fileno(fm.file), _O_BINARY);
#endif
	} else {
		fm.file = fopen(filename, "wb");
		if (!fm.file) {
			fprintf(stderr, "Failed to open %s\n", filename);
			exit(1);
		}
	}

	/* Reset endpoint before we start reading from it (mandatory) */
	r = rtlsdr_reset_buffer(dev);
	if (r < 0) {
		fprintf(stderr, "WARNING: Failed to reset buffers.\n");}

	pthread_create(&demod_thread, NULL, demod_thread_fn, (void *)(&fm));
	/*rtlsdr_read_async(dev, rtlsdr_callback, (void *)(&fm),
			      DEFAULT_ASYNC_BUF_NUMBER,
			      ACTUAL_BUF_LENGTH);*/

	while (!do_exit) {
		sync_read(buffer, ACTUAL_BUF_LENGTH, &fm);
	}

	while (!do_exit) {
		sync_read(buffer, ACTUAL_BUF_LENGTH, &fm);
	}
	
	if (do_exit) {
		fprintf(stderr, "\nUser cancel, exiting...\n");}
	else {
		fprintf(stderr, "\nLibrary error %d, exiting...\n", r);}

	//rtlsdr_cancel_async(dev);
	safe_cond_signal(&data_ready, &data_mutex);
	pthread_join(demod_thread, NULL);

	pthread_cond_destroy(&data_ready);
	pthread_rwlock_destroy(&data_rw);
	pthread_mutex_destroy(&data_mutex);

	if (fm.file != stdout) {
		fclose(fm.file);}

	rtlsdr_close(dev);
	free (buffer);
	return r >= 0 ? r : -r;
}
예제 #9
0
static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
  int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread;
  VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1);
  MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2);
  ENTROPY_CONTEXT_PLANES mb_row_left_context;

  while (1) {
    if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) break;

    if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) {
      const int nsync = cpi->mt_sync_range;
      VP8_COMMON *cm = &cpi->common;
      int mb_row;
      MACROBLOCK *x = &mbri->mb;
      MACROBLOCKD *xd = &x->e_mbd;
      TOKENEXTRA *tp;
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
      TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24);
      const int num_part = (1 << cm->multi_token_partition);
#endif

      int *segment_counts = mbri->segment_counts;
      int *totalrate = &mbri->totalrate;

      /* we're shutting down */
      if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) break;

      xd->mode_info_context = cm->mi + cm->mode_info_stride * (ithread + 1);
      xd->mode_info_stride = cm->mode_info_stride;

      for (mb_row = ithread + 1; mb_row < cm->mb_rows;
           mb_row += (cpi->encoding_thread_count + 1)) {
        int recon_yoffset, recon_uvoffset;
        int mb_col;
        int ref_fb_idx = cm->lst_fb_idx;
        int dst_fb_idx = cm->new_fb_idx;
        int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
        int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
        int map_index = (mb_row * cm->mb_cols);
        const int *last_row_current_mb_col;
        int *current_mb_col = &cpi->mt_current_mb_col[mb_row];

#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
        vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)];
#else
        tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24));
        cpi->tplist[mb_row].start = tp;
#endif

        last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];

        /* reset above block coeffs */
        xd->above_context = cm->above_context;
        xd->left_context = &mb_row_left_context;

        vp8_zero(mb_row_left_context);

        xd->up_available = (mb_row != 0);
        recon_yoffset = (mb_row * recon_y_stride * 16);
        recon_uvoffset = (mb_row * recon_uv_stride * 8);

        /* Set the mb activity pointer to the start of the row. */
        x->mb_activity_ptr = &cpi->mb_activity_map[map_index];

        /* for each macroblock col in image */
        for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
          if (((mb_col - 1) % nsync) == 0) {
            pthread_mutex_t *mutex = &cpi->pmutex[mb_row];
            protected_write(mutex, current_mb_col, mb_col - 1);
          }

          if (mb_row && !(mb_col & (nsync - 1))) {
            pthread_mutex_t *mutex = &cpi->pmutex[mb_row - 1];
            sync_read(mutex, mb_col, last_row_current_mb_col, nsync);
          }

#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
          tp = tp_start;
#endif

          /* Distance of Mb to the various image edges.
           * These specified to 8th pel as they are always compared
           * to values that are in 1/8th pel units
           */
          xd->mb_to_left_edge = -((mb_col * 16) << 3);
          xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
          xd->mb_to_top_edge = -((mb_row * 16) << 3);
          xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;

          /* Set up limit values for motion vectors used to prevent
           * them extending outside the UMV borders
           */
          x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
          x->mv_col_max =
              ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
          x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
          x->mv_row_max =
              ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);

          xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
          xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
          xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
          xd->left_available = (mb_col != 0);

          x->rddiv = cpi->RDDIV;
          x->rdmult = cpi->RDMULT;

          /* Copy current mb to a buffer */
          vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);

          if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);

          /* Is segmentation enabled */
          /* MB level adjustment to quantizer */
          if (xd->segmentation_enabled) {
            /* Code to set segment id in xd->mbmi.segment_id for
             * current MB (with range checking)
             */
            if (cpi->segmentation_map[map_index + mb_col] <= 3) {
              xd->mode_info_context->mbmi.segment_id =
                  cpi->segmentation_map[map_index + mb_col];
            } else {
              xd->mode_info_context->mbmi.segment_id = 0;
            }

            vp8cx_mb_init_quantizer(cpi, x, 1);
          } else {
            /* Set to Segment 0 by default */
            xd->mode_info_context->mbmi.segment_id = 0;
          }

          x->active_ptr = cpi->active_map + map_index + mb_col;

          if (cm->frame_type == KEY_FRAME) {
            *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp);
#ifdef MODE_STATS
            y_modes[xd->mbmi.mode]++;
#endif
          } else {
            *totalrate += vp8cx_encode_inter_macroblock(
                cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);

#ifdef MODE_STATS
            inter_y_modes[xd->mbmi.mode]++;

            if (xd->mbmi.mode == SPLITMV) {
              int b;

              for (b = 0; b < xd->mbmi.partition_count; ++b) {
                inter_b_modes[x->partition->bmi[b].mode]++;
              }
            }

#endif
            // Keep track of how many (consecutive) times a  block
            // is coded as ZEROMV_LASTREF, for base layer frames.
            // Reset to 0 if its coded as anything else.
            if (cpi->current_layer == 0) {
              if (xd->mode_info_context->mbmi.mode == ZEROMV &&
                  xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
                // Increment, check for wrap-around.
                if (cpi->consec_zero_last[map_index + mb_col] < 255) {
                  cpi->consec_zero_last[map_index + mb_col] += 1;
                }
                if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
                  cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
                }
              } else {
                cpi->consec_zero_last[map_index + mb_col] = 0;
                cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
              }
              if (x->zero_last_dot_suppress) {
                cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
              }
            }

            /* Special case code for cyclic refresh
             * If cyclic update enabled then copy
             * xd->mbmi.segment_id; (which may have been updated
             * based on mode during
             * vp8cx_encode_inter_macroblock()) back into the
             * global segmentation map
             */
            if ((cpi->current_layer == 0) &&
                (cpi->cyclic_refresh_mode_enabled &&
                 xd->segmentation_enabled)) {
              const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
              cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;

              /* If the block has been refreshed mark it as clean
               * (the magnitude of the -ve influences how long it
               * will be before we consider another refresh):
               * Else if it was coded (last frame 0,0) and has
               * not already been refreshed then mark it as a
               * candidate for cleanup next time (marked 0) else
               * mark it as dirty (1).
               */
              if (mbmi->segment_id) {
                cpi->cyclic_refresh_map[map_index + mb_col] = -1;
              } else if ((mbmi->mode == ZEROMV) &&
                         (mbmi->ref_frame == LAST_FRAME)) {
                if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
                  cpi->cyclic_refresh_map[map_index + mb_col] = 0;
                }
              } else {
                cpi->cyclic_refresh_map[map_index + mb_col] = 1;
              }
            }
          }

#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
          /* pack tokens for this MB */
          {
            int tok_count = tp - tp_start;
            vp8_pack_tokens(w, tp_start, tok_count);
          }
#else
          cpi->tplist[mb_row].stop = tp;
#endif
          /* Increment pointer into gf usage flags structure. */
          x->gf_active_ptr++;

          /* Increment the activity mask pointers. */
          x->mb_activity_ptr++;

          /* adjust to the next column of macroblocks */
          x->src.y_buffer += 16;
          x->src.u_buffer += 8;
          x->src.v_buffer += 8;

          recon_yoffset += 16;
          recon_uvoffset += 8;

          /* Keep track of segment usage */
          segment_counts[xd->mode_info_context->mbmi.segment_id]++;

          /* skip to next mb */
          xd->mode_info_context++;
          x->partition_info++;
          xd->above_context++;
        }

        vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
                          xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);

        protected_write(&cpi->pmutex[mb_row], current_mb_col, mb_col + nsync);

        /* this is to account for the border */
        xd->mode_info_context++;
        x->partition_info++;

        x->src.y_buffer +=
            16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
            16 * cm->mb_cols;
        x->src.u_buffer +=
            8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
            8 * cm->mb_cols;
        x->src.v_buffer +=
            8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
            8 * cm->mb_cols;

        xd->mode_info_context +=
            xd->mode_info_stride * cpi->encoding_thread_count;
        x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
        x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
      }
      /* Signal that this thread has completed processing its rows. */
      sem_post(&cpi->h_event_end_encoding[ithread]);
    }
  }

  /* printf("exit thread %d\n", ithread); */
  return 0;
}
예제 #10
0
/*
 * Initialize a disk.  In real life we will have to deal with
 * bootstrapping both the template file and root file.
 */
xn_err_t sys_xn_init(void) {
	int new_p;
#ifndef EXOPC
	char block[XN_BLOCK_SIZE];
#endif
	size_t f_blocks, nbytes;
	void *fm;

	si->si_xn_entries = 0;

	XN_DEV = 0;

	if (XN_DEV >= si->si_ndisks) { 
	  printf ("No XN is being configured (XN_DEV %d, si_ndisks %d)\n", 
	      XN_DEV, si->si_ndisks);
	  return -E_NO_XN;
	} else {
	  printf ("Giving XN permissions to disk %d\n", XN_DEV);
        }

	init();

	/* Allocate blocks for the catalogues. */
	fm = db_get_freemap(&nbytes);
	super_block.f_nbytes = nbytes;
	f_blocks = bytes_to_blocks(nbytes);
	printf("free map takes up %d f_blocks %d\n", nbytes, f_blocks);
	f_db = SUPER_BLOCK_DB + 1;
	r_db = f_db + f_blocks;
	t_db = r_db + R_NBLOCKS;
	
	if(db_alloc(SUPER_BLOCK_DB, 1) != SUPER_BLOCK_DB)
		fatal(Could not allocate);
//	bc_check("checking super", SUPER_BLOCK_DB);

	if(db_alloc(f_db, f_blocks) != f_db)
		fatal(Could not allocate);
//	bc_check("checking freemap", f_db);
        if(db_alloc(r_db, R_NBLOCKS) != r_db)
                fatal(Could not allocate);
//	bc_check("checking root catalogue", r_db);
        if(db_alloc(t_db, T_NBLOCKS) != t_db)
                fatal(Could not allocate);
//	bc_check("checking type catalogue", t_db);
	
#ifdef EXOPC
	/* Always redo disk. */
	new_p = 1;
#else
	/* See if we are booting on a new disk. */
	sync_read(block, SUPER_BLOCK_DB, 1);
	memcpy(&super_block, block, sizeof super_block);
	new_p = (super_block.cookie != compute_cookie(&super_block));

	if(!new_p) {
		printf("old disk\n");
		assert(super_block.r_db == r_db);
		assert(super_block.t_db == t_db);
		assert(super_block.f_db == f_db);
		/* tells us if we have to reconstruct. */
		if(super_block.clean)
			printf("clean shutdown\n");
		else {
			printf("unclean shutdown\n");
			/* xn_reconstruct_disk(); */
		}

		/* read in free map. */
		fm = malloc(f_blocks * XN_BLOCK_SIZE);
		assert(fm);
		sync_read(fm, super_block.f_db, f_blocks);
		db_put_freemap(fm, nbytes);
		free(fm);

		root_init(new_p);
	} else
#endif
 {
		printf("new disk\n");
		super_block.r_db = r_db;
		super_block.t_db = t_db;
		super_block.f_db = f_db;
		super_block.f_nbytes = nbytes;
		super_block.clean = 1;
		super_block.cookie = compute_cookie(&super_block);

		/* Create new entry. */
		root_init(new_p); 	
	}
	super_block.clean = 0;
	super_block.cookie = compute_cookie(&super_block);

#ifndef EXOPC
	sync_write(SUPER_BLOCK_DB, &super_block, 1);
	sync_read(block, SUPER_BLOCK_DB, 1);
	assert(compute_cookie(block) == super_block.cookie);
#endif

        return XN_SUCCESS;
}