Пример #1
0
xn_err_t sys_xn_shutdown(void) {
#ifndef EXOPC
	void *f;
	size_t nbytes, nblocks;
#endif

	/* DRE */
	/* Flush registry */
	try(xr_clean(XN_ALL));
	try(xr_flushall(XN_ALL));

#ifndef EXOPC
	/* Flush free map. */
	f = db_get_freemap(&nbytes);
	printf("nbytes = %d, fnbytes = %d\n", nbytes, super_block.f_nbytes);
	assert(nbytes == super_block.f_nbytes);
	nblocks = bytes_to_blocks(nbytes);
	sync_write(super_block.f_db, f, nblocks);

	/* Do last to make atomic. */
	super_block.clean = 1;
	super_block.cookie = compute_cookie(&super_block);

	sync_write(SUPER_BLOCK_DB, &super_block, 1);

	disk_shutdown();
#endif
	tmplt_shutdown();

	return XN_SUCCESS; 	/* Should reboot. */
}

xn_err_t sys_xn_format(void) {
	fatal(Not implemented);
	return XN_SUCCESS;
}

/* This will be a method. */
xn_err_t 
sys_install_mount(char *name, db_t *db, size_t nelem, xn_elem_t t, cap_t c) {
        xn_elem_t res;
	
	ensure(write_accessible(db, sizeof *db), 	XN_CANNOT_ACCESS);

	/* DRE */
	xn_in_kernel = 1;
		res = root_install(name, db, nelem, t, c);
	xn_in_kernel = 0;

        sys_return(res);
}
Пример #2
0
// Implement row loopfiltering for each thread.
static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer,
                                VP9_COMMON *const cm, MACROBLOCKD *const xd,
                                int start, int stop, int y_only,
                                VP9LfSync *const lf_sync, int num_lf_workers) {
  const int num_planes = y_only ? 1 : MAX_MB_PLANE;
  int r, c;  // SB row and col
  const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;

  for (r = start; r < stop; r += num_lf_workers) {
    const int mi_row = r << MI_BLOCK_SIZE_LOG2;
    MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;

    for (c = 0; c < sb_cols; ++c) {
      const int mi_col = c << MI_BLOCK_SIZE_LOG2;
      LOOP_FILTER_MASK lfm;
      int plane;

      sync_read(lf_sync, r, c);

      vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col);
      vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);

      for (plane = 0; plane < num_planes; ++plane) {
        vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm);
      }

      sync_write(lf_sync, r, c, sb_cols);
    }
  }
}
Пример #3
0
static int loop_filter_row_worker(AV1LfSync *const lf_sync,
                                  LFWorkerData *const lf_data) {
  const int num_planes = lf_data->y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols =
      mi_cols_aligned_to_sb(lf_data->cm) >> lf_data->cm->mib_size_log2;
  int mi_row, mi_col;
#if !CONFIG_EXT_PARTITION_TYPES
  enum lf_path path = get_loop_filter_path(lf_data->y_only, lf_data->planes);
#endif  // !CONFIG_EXT_PARTITION_TYPES

#if CONFIG_EXT_PARTITION
  printf(
      "STOPPING: This code has not been modified to work with the "
      "extended coding unit size experiment");
  exit(EXIT_FAILURE);
#endif  // CONFIG_EXT_PARTITION

  for (mi_row = lf_data->start; mi_row < lf_data->stop;
       mi_row += lf_sync->num_workers * lf_data->cm->mib_size) {
    MODE_INFO **const mi =
        lf_data->cm->mi_grid_visible + mi_row * lf_data->cm->mi_stride;

    for (mi_col = 0; mi_col < lf_data->cm->mi_cols;
         mi_col += lf_data->cm->mib_size) {
      const int r = mi_row >> lf_data->cm->mib_size_log2;
      const int c = mi_col >> lf_data->cm->mib_size_log2;
#if !CONFIG_EXT_PARTITION_TYPES
      LOOP_FILTER_MASK lfm;
#endif
      int plane;

      sync_read(lf_sync, r, c);

      av1_setup_dst_planes(lf_data->planes, lf_data->cm->sb_size,
                           lf_data->frame_buffer, mi_row, mi_col);
#if CONFIG_EXT_PARTITION_TYPES
      for (plane = 0; plane < num_planes; ++plane) {
        av1_filter_block_plane_non420_ver(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
        av1_filter_block_plane_non420_hor(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
      }
#else
      av1_setup_mask(lf_data->cm, mi_row, mi_col, mi + mi_col,
                     lf_data->cm->mi_stride, &lfm);

      for (plane = 0; plane < num_planes; ++plane) {
        loop_filter_block_plane_ver(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
        loop_filter_block_plane_hor(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
      }
#endif  // CONFIG_EXT_PARTITION_TYPES
      sync_write(lf_sync, r, c, sb_cols);
    }
  }
  return 1;
}
Пример #4
0
// Implement row loopfiltering for each thread.
static INLINE
void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
                             VP9_COMMON *const cm,
                             struct macroblockd_plane planes[MAX_MB_PLANE],
                             int start, int stop, int y_only,
                             VP9LfSync *const lf_sync) {
  const int num_planes = y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
  int mi_row, mi_col;
  enum lf_path path;
  if (y_only)
    path = LF_PATH_444;
  else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1)
    path = LF_PATH_420;
  else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0)
    path = LF_PATH_444;
  else
    path = LF_PATH_SLOW;

  for (mi_row = start; mi_row < stop;
       mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
    MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;

    for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
      const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
      const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
      LOOP_FILTER_MASK lfm;
      int plane;

      sync_read(lf_sync, r, c);

      vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);

      // TODO(JBB): Make setup_mask work for non 420.
      vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
                     &lfm);

      vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
      for (plane = 1; plane < num_planes; ++plane) {
        switch (path) {
          case LF_PATH_420:
            vp9_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
            break;
          case LF_PATH_444:
            vp9_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
            break;
          case LF_PATH_SLOW:
            vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
                                          mi_row, mi_col);
            break;
        }
      }

      sync_write(lf_sync, r, c, sb_cols);
    }
  }
}
Пример #5
0
static int loop_filter_hor_row_worker(AV1LfSync *const lf_sync,
                                      LFWorkerData *const lf_data) {
  const int num_planes = lf_data->y_only ? 1 : MAX_MB_PLANE;
  const int sb_cols =
      mi_cols_aligned_to_sb(lf_data->cm) >> lf_data->cm->mib_size_log2;
  int mi_row, mi_col;
#if !CONFIG_EXT_PARTITION_TYPES
  enum lf_path path = get_loop_filter_path(lf_data->y_only, lf_data->planes);
#endif

  for (mi_row = lf_data->start; mi_row < lf_data->stop;
       mi_row += lf_sync->num_workers * lf_data->cm->mib_size) {
    MODE_INFO **const mi =
        lf_data->cm->mi_grid_visible + mi_row * lf_data->cm->mi_stride;

    for (mi_col = 0; mi_col < lf_data->cm->mi_cols;
         mi_col += lf_data->cm->mib_size) {
      const int r = mi_row >> lf_data->cm->mib_size_log2;
      const int c = mi_col >> lf_data->cm->mib_size_log2;
      LOOP_FILTER_MASK lfm;
      int plane;

      // TODO([email protected]): For better parallelization, reorder
      // the outer loop to column-based and remove the synchronizations here.
      sync_read(lf_sync, r, c);

      av1_setup_dst_planes(lf_data->planes, lf_data->cm->sb_size,
                           lf_data->frame_buffer, mi_row, mi_col);
      av1_setup_mask(lf_data->cm, mi_row, mi_col, mi + mi_col,
                     lf_data->cm->mi_stride, &lfm);
#if CONFIG_EXT_PARTITION_TYPES
      for (plane = 0; plane < num_planes; ++plane)
        av1_filter_block_plane_non420_hor(lf_data->cm, &lf_data->planes[plane],
                                          mi + mi_col, mi_row, mi_col, plane);
#else
      for (plane = 0; plane < num_planes; ++plane)
        loop_filter_block_plane_hor(lf_data->cm, lf_data->planes, plane,
                                    mi + mi_col, mi_row, mi_col, path, &lfm);
#endif
      sync_write(lf_sync, r, c, sb_cols);
    }
  }
  return 1;
}
Пример #6
0
int main()
{
	state = sync_init("file.txt", 4096*2, "log.txt");
	if (state != 0)
	{
		printf("sync_init fail\n");
		return -1;
	}

	state = sync_read(ans, 10, 4095);
	if (state != 0)
	{	
		printf("sync_read fail\n");
		return -1;
	}
	ans[10] = '\0';
	printf("read answer: %s\n", ans);

	state = sync_write(ans, 10, 1);
	if (state != 0)
	{	
		printf("sync_write fail\n");
		return -1;
	}
	state = sync_read(ans, 10, 1);
	if (state != 0)
	{	
		printf("sync_read fail\n");
		return -1;
	}
	ans[10] = '\0';
	printf("read answer: %s\n", ans);

	state = sync_exit();
	if (state != 0)
	{	
		printf("sync_exit fail\n");
		return -1;
	}

	return 0;
}
Пример #7
0
/*
 * Initialize a disk.  In real life we will have to deal with
 * bootstrapping both the template file and root file.
 */
xn_err_t sys_xn_init(void) {
	int new_p;
#ifndef EXOPC
	char block[XN_BLOCK_SIZE];
#endif
	size_t f_blocks, nbytes;
	void *fm;

	si->si_xn_entries = 0;

	XN_DEV = 0;

	if (XN_DEV >= si->si_ndisks) { 
	  printf ("No XN is being configured (XN_DEV %d, si_ndisks %d)\n", 
	      XN_DEV, si->si_ndisks);
	  return -E_NO_XN;
	} else {
	  printf ("Giving XN permissions to disk %d\n", XN_DEV);
        }

	init();

	/* Allocate blocks for the catalogues. */
	fm = db_get_freemap(&nbytes);
	super_block.f_nbytes = nbytes;
	f_blocks = bytes_to_blocks(nbytes);
	printf("free map takes up %d f_blocks %d\n", nbytes, f_blocks);
	f_db = SUPER_BLOCK_DB + 1;
	r_db = f_db + f_blocks;
	t_db = r_db + R_NBLOCKS;
	
	if(db_alloc(SUPER_BLOCK_DB, 1) != SUPER_BLOCK_DB)
		fatal(Could not allocate);
//	bc_check("checking super", SUPER_BLOCK_DB);

	if(db_alloc(f_db, f_blocks) != f_db)
		fatal(Could not allocate);
//	bc_check("checking freemap", f_db);
        if(db_alloc(r_db, R_NBLOCKS) != r_db)
                fatal(Could not allocate);
//	bc_check("checking root catalogue", r_db);
        if(db_alloc(t_db, T_NBLOCKS) != t_db)
                fatal(Could not allocate);
//	bc_check("checking type catalogue", t_db);
	
#ifdef EXOPC
	/* Always redo disk. */
	new_p = 1;
#else
	/* See if we are booting on a new disk. */
	sync_read(block, SUPER_BLOCK_DB, 1);
	memcpy(&super_block, block, sizeof super_block);
	new_p = (super_block.cookie != compute_cookie(&super_block));

	if(!new_p) {
		printf("old disk\n");
		assert(super_block.r_db == r_db);
		assert(super_block.t_db == t_db);
		assert(super_block.f_db == f_db);
		/* tells us if we have to reconstruct. */
		if(super_block.clean)
			printf("clean shutdown\n");
		else {
			printf("unclean shutdown\n");
			/* xn_reconstruct_disk(); */
		}

		/* read in free map. */
		fm = malloc(f_blocks * XN_BLOCK_SIZE);
		assert(fm);
		sync_read(fm, super_block.f_db, f_blocks);
		db_put_freemap(fm, nbytes);
		free(fm);

		root_init(new_p);
	} else
#endif
 {
		printf("new disk\n");
		super_block.r_db = r_db;
		super_block.t_db = t_db;
		super_block.f_db = f_db;
		super_block.f_nbytes = nbytes;
		super_block.clean = 1;
		super_block.cookie = compute_cookie(&super_block);

		/* Create new entry. */
		root_init(new_p); 	
	}
	super_block.clean = 0;
	super_block.cookie = compute_cookie(&super_block);

#ifndef EXOPC
	sync_write(SUPER_BLOCK_DB, &super_block, 1);
	sync_read(block, SUPER_BLOCK_DB, 1);
	assert(compute_cookie(block) == super_block.cookie);
#endif

        return XN_SUCCESS;
}