コード例 #1
0
/*************************************************
* Allocation                                     *
*************************************************/
void* Pooling_Allocator::allocate(u32bit n)
   {
   const u32bit BITMAP_SIZE = Memory_Block::bitmap_size();
   const u32bit BLOCK_SIZE = Memory_Block::block_size();

   Mutex_Holder lock(mutex);

   if(n <= BITMAP_SIZE * BLOCK_SIZE)
      {
      const u32bit block_no = round_up(n, BLOCK_SIZE) / BLOCK_SIZE;

      byte* mem = allocate_blocks(block_no);
      if(mem)
         return mem;

      get_more_core(PREF_SIZE);

      mem = allocate_blocks(block_no);
      if(mem)
         return mem;

      throw Memory_Exhaustion();
      }

   void* new_buf = alloc_block(n);
   if(new_buf)
      return new_buf;

   throw Memory_Exhaustion();
   }
コード例 #2
0
ファイル: reflect.c プロジェクト: Bjarne-Madsen/Firmware
int
reflect_main(int argc, char *argv[])
{
    uint32_t total = 0;
    printf("Starting reflector\n");

    allocate_blocks();

    while (true) {
        char buf[128];
        ssize_t n = read(0, buf, sizeof(buf));
        if (n < 0) {
            break;
        }
        if (n > 0) {
            write(1, buf, n);
        }
        total += n;
        if (total > 1024000) {
            check_blocks();
            total = 0;
        }
    }
    return OK;
}
コード例 #3
0
ファイル: endpoint_tests.c プロジェクト: An-mol/grpc
static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
                                              void *data, bool success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;

  if (success) {
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size != 0) {
      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
                          &state->done_write);
      free(slices);
      return;
    }
  }

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
コード例 #4
0
ファイル: endpoint_tests.c プロジェクト: aaronjheng/grpc
static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
                                              void *data, grpc_error *error) {
  struct read_and_write_test_state *state = data;
  grpc_slice *slices = NULL;
  size_t nslices;

  if (error == GRPC_ERROR_NONE) {
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size != 0) {
      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      grpc_slice_buffer_reset_and_unref(&state->outgoing);
      grpc_slice_buffer_addn(&state->outgoing, slices, nslices);
      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
                          &state->done_write);
      gpr_free(slices);
      return;
    }
  }

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(g_mu);
  state->write_done = 1 + (error == GRPC_ERROR_NONE);
  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL));
  gpr_mu_unlock(g_mu);
}
コード例 #5
0
ファイル: tcp_posix_test.c プロジェクト: larsonmpdx/grpc
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
コード例 #6
0
ファイル: tcp_posix_test.c プロジェクト: royalharsh/grpc
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
コード例 #7
0
ファイル: endpoint_tests.c プロジェクト: rootusr/grpc
static void shutdown_during_write_test(grpc_endpoint_test_config config,
                                       size_t slice_size) {
    /* test that shutdown with a pending write creates no leaks */
    gpr_timespec deadline;
    size_t size;
    size_t nblocks;
    int current_data = 1;
    shutdown_during_write_test_state read_st;
    shutdown_during_write_test_state write_st;
    gpr_slice *slices;
    grpc_endpoint_test_fixture f =
        begin_test(config, "shutdown_during_write_test", slice_size);

    gpr_log(GPR_INFO, "testing shutdown during a write");

    read_st.ep = f.client_ep;
    write_st.ep = f.server_ep;
    read_st.done = 0;
    write_st.done = 0;

    grpc_endpoint_notify_on_read(
        read_st.ep, shutdown_during_write_test_read_handler, &read_st);
    for (size = 1;; size *= 2) {
        slices = allocate_blocks(size, 1, &nblocks, &current_data);
        switch (grpc_endpoint_write(write_st.ep, slices, nblocks,
                                    shutdown_during_write_test_write_handler,
                                    &write_st)) {
        case GRPC_ENDPOINT_WRITE_DONE:
            break;
        case GRPC_ENDPOINT_WRITE_ERROR:
            gpr_log(GPR_ERROR, "error writing");
            abort();
        case GRPC_ENDPOINT_WRITE_PENDING:
            grpc_endpoint_shutdown(write_st.ep);
            deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
            gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
            while (!write_st.done) {
                GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
                grpc_pollset_work(g_pollset, deadline);
            }
            gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
            grpc_endpoint_destroy(write_st.ep);
            gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
            while (!read_st.done) {
                GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
                grpc_pollset_work(g_pollset, deadline);
            }
            gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
            gpr_free(slices);
            end_test(config);
            return;
        }
        gpr_free(slices);
    }

    gpr_log(GPR_ERROR, "should never reach here");
    abort();
}
コード例 #8
0
ファイル: endpoint_tests.c プロジェクト: rootusr/grpc
static void read_and_write_test_write_handler(void *data,
        grpc_endpoint_cb_status error) {
    struct read_and_write_test_state *state = data;
    gpr_slice *slices = NULL;
    size_t nslices;
    grpc_endpoint_write_status write_status;

    GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR);

    gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler",
            error);

    if (error == GRPC_ENDPOINT_CB_SHUTDOWN) {
        gpr_log(GPR_INFO, "Write handler shutdown");
        gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
        state->write_done = 1;
        grpc_pollset_kick(g_pollset);
        gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
        return;
    }

    for (;;) {
        /* Need to do inline writes until they don't succeed synchronously or we
           finish writing */
        state->bytes_written += state->current_write_size;
        if (state->target_bytes - state->bytes_written <
                state->current_write_size) {
            state->current_write_size = state->target_bytes - state->bytes_written;
        }
        if (state->current_write_size == 0) {
            break;
        }

        slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                                 &state->current_write_data);
        write_status =
            grpc_endpoint_write(state->write_ep, slices, nslices,
                                read_and_write_test_write_handler, state);
        gpr_log(GPR_DEBUG, "write_status=%d", write_status);
        GPR_ASSERT(write_status != GRPC_ENDPOINT_WRITE_ERROR);
        free(slices);
        if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
            return;
        }
    }
    GPR_ASSERT(state->bytes_written == state->target_bytes);

    gpr_log(GPR_INFO, "Write handler done");
    gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
    state->write_done = 1;
    grpc_pollset_kick(g_pollset);
    gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
コード例 #9
0
ファイル: tcp_posix_test.c プロジェクト: nkibler/grpc
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  gpr_slice *slices;
  gpr_uint8 current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  for (;;) {
    grpc_pollset_worker worker;
    if (state.write_done) {
      break;
    }
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
コード例 #10
0
ファイル: AESFullTest.c プロジェクト: aled1027/JustGarble
static void
buildAESCircuit(GarbledCircuit *gc, block *inputLabels)
{
	GarblingContext ctxt;

	int q = 50000; //Just an upper bound
	int r = 50000;
    int *addKeyInputs = calloc(2 * m, sizeof(int));
    int *addKeyOutputs = calloc(m, sizeof(int));
    int *subBytesOutputs = calloc(m, sizeof(int));
    int *shiftRowsOutputs = calloc(m, sizeof(int));
    int *mixColumnOutputs = calloc(m, sizeof(int));
    block *outputMap = allocate_blocks(2 * m);

	createEmptyGarbledCircuit(gc, n, m, q, r, inputLabels);
	startBuilding(gc, &ctxt);

	countToN(addKeyInputs, 256);

	for (int round = 0; round < roundLimit; round++) {

		AddRoundKey(gc, &ctxt, addKeyInputs, addKeyOutputs);

		for (int i = 0; i < 16; i++) {
			SubBytes(gc, &ctxt, addKeyOutputs + 8 * i, subBytesOutputs + 8 * i);
		}

		ShiftRows(gc, &ctxt, subBytesOutputs, shiftRowsOutputs);

		for (int i = 0; i < 4; i++) {
			if (round != roundLimit - 1)
				MixColumns(gc, &ctxt, shiftRowsOutputs + i * 32,
                           mixColumnOutputs + 32 * i);
		}
		for (int i = 0; i < 128; i++) {
			addKeyInputs[i] = mixColumnOutputs[i];
			addKeyInputs[i + 128] = (round + 2) * 128 + i;
		}
	}

	finishBuilding(gc, &ctxt, outputMap, mixColumnOutputs);
	/* writeCircuitToFile(gc, AES_CIRCUIT_FILE_NAME); */

    free(addKeyInputs);
    free(addKeyOutputs);
    free(subBytesOutputs);
    free(shiftRowsOutputs);
    free(mixColumnOutputs);
    free(outputMap);
}
コード例 #11
0
ファイル: indirect.c プロジェクト: TeamNyx/system_extras
static struct block_allocation *do_inode_allocate_indirect(
		struct ext4_inode *inode, u32 block_len)
{
	u32 indirect_len = indirect_blocks_needed(block_len);

	struct block_allocation *alloc = allocate_blocks(block_len + indirect_len);

	if (alloc == NULL) {
		error("Failed to allocate %d blocks", block_len + indirect_len);
		return NULL;
	}

	return alloc;
}
コード例 #12
0
ファイル: endpoint_tests.c プロジェクト: JoeWoo/grpc
static void read_and_write_test_write_handler(void *data, int success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;
  grpc_endpoint_op_status write_status;

  if (success) {
    for (;;) {
      /* Need to do inline writes until they don't succeed synchronously or we
         finish writing */
      state->bytes_written += state->current_write_size;
      if (state->target_bytes - state->bytes_written <
          state->current_write_size) {
        state->current_write_size = state->target_bytes - state->bytes_written;
      }
      if (state->current_write_size == 0) {
        break;
      }

      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      write_status = grpc_endpoint_write(state->write_ep, &state->outgoing,
                                         &state->done_write);
      free(slices);
      if (write_status == GRPC_ENDPOINT_PENDING) {
        return;
      } else if (write_status == GRPC_ENDPOINT_ERROR) {
        goto cleanup;
      }
    }
    GPR_ASSERT(state->bytes_written == state->target_bytes);
  }

cleanup:
  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
コード例 #13
0
ファイル: AESFullTest.c プロジェクト: aled1027/JustGarble
int
main(int argc, char *argv[])
{
	GarbledCircuit gc;

    block *outputMap = allocate_blocks(2 * m);
    block *inputLabels = allocate_blocks(2 * n);
    block seed;

	int *timeGarble = calloc(times, sizeof(int));
	int *timeEval = calloc(times, sizeof(int));
	double *timeGarbleMedians = calloc(times, sizeof(double));
	double *timeEvalMedians = calloc(times, sizeof(double));

    unsigned char hash[SHA_DIGEST_LENGTH];

    GarbleType type = GARBLE_TYPE_STANDARD;

    seed = seedRandom(NULL);

    createInputLabels(inputLabels, n);
    buildAESCircuit(&gc, inputLabels);
	/* readCircuitFromFile(&gc, AES_CIRCUIT_FILE_NAME); */
    garbleCircuit(&gc, outputMap, type);
    hashGarbledCircuit(&gc, hash, type);

    {
        block *extractedLabels = allocate_blocks(n);
        block *computedOutputMap = allocate_blocks(m);
        int *inputs = calloc(n, sizeof(int));
        int *outputVals = calloc(m, sizeof(int));
        for (int i = 0; i < n; ++i) {
            inputs[i] = rand() % 2;
        }
        extractLabels(extractedLabels, inputLabels, inputs, gc.n);
        evaluate(&gc, extractedLabels, computedOutputMap, type);
        assert(mapOutputs(outputMap, computedOutputMap, outputVals, m) == SUCCESS);
        {
            GarbledCircuit gc2;

            (void) seedRandom(&seed);
            createInputLabels(inputLabels, n);
            buildAESCircuit(&gc2, inputLabels);
            assert(checkGarbledCircuit(&gc2, hash, type) == SUCCESS);
        }
        free(extractedLabels);
        free(computedOutputMap);
        free(inputs);
        free(outputVals);
    }

	for (int j = 0; j < times; j++) {
		for (int i = 0; i < times; i++) {
			timeGarble[i] = timedGarble(&gc, outputMap, type);
			timeEval[i] = timedEval(&gc, inputLabels, type);
		}
		timeGarbleMedians[j] = ((double) median(timeGarble, times)) / gc.q;
		timeEvalMedians[j] = ((double) median(timeEval, times)) / gc.q;
	}
	double garblingTime = doubleMean(timeGarbleMedians, times);
	double evalTime = doubleMean(timeEvalMedians, times);
	printf("%lf %lf\n", garblingTime, evalTime);

    free(outputMap);
    free(inputLabels);
    free(timeGarble);
    free(timeEval);
    free(timeGarbleMedians);
    free(timeEvalMedians);
	return 0;
}
コード例 #14
0
ファイル: extent.c プロジェクト: 543872407/Linux_SourceCode
static struct block_allocation *do_inode_allocate_extents(
	struct ext4_inode *inode, u64 len)
{
	u32 block_len = DIV_ROUND_UP(len, info.block_size);
	struct block_allocation *alloc = allocate_blocks(block_len + 1);
	u32 extent_block = 0;
	u32 file_block = 0;
	struct ext4_extent *extent;
	u64 blocks;

	if (alloc == NULL) {
		error("Failed to allocate %d blocks\n", block_len + 1);
		return NULL;
	}

	int allocation_len = block_allocation_num_regions(alloc);
	if (allocation_len <= 3) {
		reduce_allocation(alloc, 1);
	} else {
		reserve_oob_blocks(alloc, 1);
		extent_block = get_oob_block(alloc, 0);
	}

	if (!extent_block) {
		struct ext4_extent_header *hdr =
			(struct ext4_extent_header *)&inode->i_block[0];
		hdr->eh_magic = EXT4_EXT_MAGIC;
		hdr->eh_entries = allocation_len;
		hdr->eh_max = 3;
		hdr->eh_generation = 0;
		hdr->eh_depth = 0;

		extent = (struct ext4_extent *)&inode->i_block[3];
	} else {
		struct ext4_extent_header *hdr =
			(struct ext4_extent_header *)&inode->i_block[0];
		hdr->eh_magic = EXT4_EXT_MAGIC;
		hdr->eh_entries = 1;
		hdr->eh_max = 3;
		hdr->eh_generation = 0;
		hdr->eh_depth = 1;

		struct ext4_extent_idx *idx =
			(struct ext4_extent_idx *)&inode->i_block[3];
		idx->ei_block = 0;
		idx->ei_leaf_lo = extent_block;
		idx->ei_leaf_hi = 0;
		idx->ei_unused = 0;

		u8 *data = calloc(info.block_size, 1);
		if (!data)
			critical_error_errno("calloc");

		queue_data_block(data, info.block_size, extent_block);

		if (((int)(info.block_size - sizeof(struct ext4_extent_header) /
				sizeof(struct ext4_extent))) < allocation_len) {
			error("File size %llu is too big to fit in a single extent block\n",
					len);
			return NULL;
		}

		hdr = (struct ext4_extent_header *)data;
		hdr->eh_magic = EXT4_EXT_MAGIC;
		hdr->eh_entries = allocation_len;
		hdr->eh_max = (info.block_size - sizeof(struct ext4_extent_header)) /
			sizeof(struct ext4_extent);
		hdr->eh_generation = 0;
		hdr->eh_depth = 0;

		extent = (struct ext4_extent *)(data +
			sizeof(struct ext4_extent_header));
	}

	for (; !last_region(alloc); extent++, get_next_region(alloc)) {
		u32 region_block;
		u32 region_len;

		get_region(alloc, &region_block, &region_len);
		extent->ee_block = file_block;
		extent->ee_len = region_len;
		extent->ee_start_hi = 0;
		extent->ee_start_lo = region_block;
		file_block += region_len;
	}

	if (extent_block)
		block_len += 1;

	blocks = (u64)block_len * info.block_size / 512;

	inode->i_flags |= EXT4_EXTENTS_FL;
	inode->i_size_lo = len;
	inode->i_size_high = len >> 32;
	inode->i_blocks_lo = blocks;
	inode->osd2.linux2.l_i_blocks_high = blocks >> 32;

	rewind_alloc(alloc);

	return alloc;
}