Exemplo n.º 1
0
int pool_block_get_wait_test(void)
{
	int rv;

	rv = k_mem_pool_alloc(&POOL_ID, &block_list[0], 3000, K_FOREVER);
	if (rv != 0) {
		TC_ERROR("k_mem_pool_alloc(3000) expected %d, got %d\n", 0, rv);
		return TC_FAIL;
	}

	k_sem_give(&ALTERNATE_SEM);    /* Wake alternate_task */
	evidence = 0;
	rv = k_mem_pool_alloc(&POOL_ID, &block_list[1], 128, K_FOREVER);
	if (rv != 0) {
		TC_ERROR("k_mem_pool_alloc(128) expected %d, got %d\n", 0, rv);
		return TC_FAIL;
	}

	switch (evidence) {
	case 0:
		TC_ERROR("k_mem_pool_alloc(128) did not block!\n");
		return TC_FAIL;
	case 1:
		break;
	case 2:
	default:
		TC_ERROR("Rescheduling did not occur "
			 "after k_mem_pool_free()\n");
		return TC_FAIL;
	}

	k_mem_pool_free(&block_list[1]);

	return TC_PASS;
}
Exemplo n.º 2
0
/**
 *
 * @brief Memory pool get/free test
 *
 * @return N/A
 */
void mempool_test(void)
{
	u32_t et; /* elapsed time */
	int i;
	s32_t return_value = 0;
	struct k_mem_block block;

	PRINT_STRING(dashline, output_file);
	et = BENCH_START();
	for (i = 0; i < NR_OF_POOL_RUNS; i++) {
		return_value |= k_mem_pool_alloc(&DEMOPOOL,
						&block,
						16,
						K_FOREVER);
		k_mem_pool_free(&block);
	}
	et = TIME_STAMP_DELTA_GET(et);
	check_result();

	if (return_value != 0) {
		k_panic();
	}
	PRINT_F(output_file, FORMAT,
		"average alloc and dealloc memory pool block",
		SYS_CLOCK_HW_CYCLES_TO_NS_AVG(et, (2 * NR_OF_POOL_RUNS)));
}
Exemplo n.º 3
0
int pool_block_get_w_func(struct k_mem_block *block, struct k_mem_pool *pool,
			  int size, s32_t unused)
{
	ARG_UNUSED(unused);

	return k_mem_pool_alloc(pool, block, size, K_FOREVER);
}
Exemplo n.º 4
0
static int mcux_elcdif_init(struct device *dev)
{
	const struct mcux_elcdif_config *config = dev->config->config_info;
	struct mcux_elcdif_data *data = dev->driver_data;
	int i;

	elcdif_rgb_mode_config_t rgb_mode = config->rgb_mode;

	data->pixel_bytes = config->bits_per_pixel / 8U;
	data->fb_bytes = data->pixel_bytes *
			 rgb_mode.panelWidth * rgb_mode.panelHeight;
	data->write_idx = 1U;

	for (i = 0; i < ARRAY_SIZE(data->fb); i++) {
		if (k_mem_pool_alloc(&mcux_elcdif_pool, &data->fb[i],
				     data->fb_bytes, K_NO_WAIT) != 0) {
			LOG_ERR("Could not allocate frame buffer %d", i);
			return -ENOMEM;
		}
		memset(data->fb[i].data, 0, data->fb_bytes);
	}
	rgb_mode.bufferAddr = (uint32_t) data->fb[0].data;

	k_sem_init(&data->sem, 1, 1);

	config->irq_config_func(dev);

	ELCDIF_RgbModeInit(config->base, &rgb_mode);
	ELCDIF_EnableInterrupts(config->base,
				kELCDIF_CurFrameDoneInterruptEnable);
	ELCDIF_RgbModeStart(config->base);

	return 0;
}
Exemplo n.º 5
0
int pool_block_get_timeout_test(void)
{
	struct k_mem_block block;
	int rv;         /* return value from k_mem_pool_alloc() */
	int j;          /* loop counter */

	for (j = 0; j < 8; j++) {
		rv = pool_block_get_work("k_mem_pool_alloc", pool_block_get_wt_func,
					 getwt_set, ARRAY_SIZE(getwt_set));
		if (rv != TC_PASS) {
			return TC_FAIL;
		}

		free_blocks(getwt_set, ARRAY_SIZE(getwt_set));
	}

	rv = k_mem_pool_alloc(&POOL_ID, &helper_block, 3148, 5);
	if (rv != 0) {
		TC_ERROR("Failed to get size 3148 byte block from POOL_ID\n");
		return TC_FAIL;
	}

	rv = k_mem_pool_alloc(&POOL_ID, &block, 3148, K_NO_WAIT);
	if (rv != -ENOMEM) {
		TC_ERROR("Unexpectedly got size 3148 "
			 "byte block from POOL_ID\n");
		return TC_FAIL;
	}

	k_sem_give(&HELPER_SEM);    /* Activate helper_task */
	rv = k_mem_pool_alloc(&POOL_ID, &block, 3148, 20);
	if (rv != 0) {
		TC_ERROR("Failed to get size 3148 byte block from POOL_ID\n");
		return TC_FAIL;
	}

	rv = k_sem_take(&REGRESS_SEM, K_NO_WAIT);
	if (rv != 0) {
		TC_ERROR("Failed to get size 3148 "
			 "byte block within 20 ticks\n");
		return TC_FAIL;
	}

	k_mem_pool_free(&block);

	return TC_PASS;
}
/*test cases*/
void test_mpool_alloc_merge_failed_diff_size(void)
{
	struct k_mem_block block[BLK_NUM_MIN], block_fail;
	size_t block_size[] = {
		BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,
		BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID,
		BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,
		BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID};
	int block_count = sizeof(block_size)/sizeof(size_t);

	/**
	 * TESTPOINT: the merging algorithm cannot combine adjacent free blocks
	 * of different sizes
	 * Test steps: 1. allocate 14 blocks in different sizes
	 *             2. free block [2~8], in different sizes
	 *             3. request a big block
	 *                verify blocks [2~8] can't be merged
	 *             4. tear down, free blocks [0, 1, 9~13]
	 */
	for (int i = 0; i < block_count; i++) {
		/* 1. allocate blocks in different sizes*/
		zassert_true(k_mem_pool_alloc(&mpool3, &block[i], block_size[i],
			K_NO_WAIT) == 0, NULL);
	}
	/* 2. free block [2~8], in different sizes*/
	for (int i = 2; i < 9; i++) {
		k_mem_pool_free(&block[i]);
	}
	/* 3. request a big block, expected failed to merge*/
	k_mem_pool_defrag(&mpool3);
	zassert_true(k_mem_pool_alloc(&mpool3, &block_fail, BLK_SIZE_MAX,
		TIMEOUT) == -EAGAIN, NULL);

	/* 4. test case tear down*/
	k_mem_pool_free(&block[0]);
	k_mem_pool_free(&block[1]);
	for (int i = 9; i < block_count; i++) {
		k_mem_pool_free(&block[i]);
	}
}
Exemplo n.º 7
0
static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema)
{
	struct k_mem_block block;

	for (int i = 0; i < PIPE_LEN; i += BYTES_TO_WRITE) {
		/**TESTPOINT: pipe block put*/
		zassert_equal(k_mem_pool_alloc(&mpool, &block, BYTES_TO_WRITE,
			K_NO_WAIT), 0, NULL);
		memcpy(block.data, &data[i], BYTES_TO_WRITE);
		k_pipe_block_put(ppipe, &block, BYTES_TO_WRITE, sema);
		if (sema) {
			k_sem_take(sema, K_FOREVER);
		}
		k_mem_pool_free(&block);
	}
}
Exemplo n.º 8
0
int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg)
{
	u8_t ep_abs_idx =  EP_ABS_IDX(cfg->ep_addr);
	usb_device_endpoint_init_struct_t epInit;
	struct k_mem_block *block;
	struct usb_ep_ctrl_data *eps = &s_Device.eps[ep_abs_idx];

	epInit.zlt = 0U;
	epInit.endpointAddress = cfg->ep_addr;
	epInit.maxPacketSize = cfg->ep_mps;
	epInit.transferType = cfg->ep_type;
	s_Device.eps[ep_abs_idx].ep_type = cfg->ep_type;

	if (ep_abs_idx >= NUM_OF_EP_MAX) {
		LOG_ERR("Wrong endpoint index/address");
		return -EINVAL;
	}

	block = &(eps->block);
	if (block->data) {
		k_mem_pool_free(block);
		block->data = NULL;
	}

	if (k_mem_pool_alloc(&ep_buf_pool, block, cfg->ep_mps, 10) == 0) {
		memset(block->data, 0, cfg->ep_mps);
	} else {
		LOG_ERR("Memory allocation time-out");
		return -ENOMEM;
	}

	s_Device.eps[ep_abs_idx].ep_mps = cfg->ep_mps;
	if (s_Device.eps[ep_abs_idx].ep_enabled) {
		s_Device.interface->deviceControl(s_Device.controllerHandle, kUSB_DeviceControlEndpointDeinit, (void *)(&cfg->ep_addr));
	}
	s_Device.interface->deviceControl(s_Device.controllerHandle, kUSB_DeviceControlEndpointInit, &epInit);

	/* if it is controlendpoint, controller will prime setup
	 * here set the occupied flag.
	 */
	if ((EP_ADDR2IDX(cfg->ep_addr) == USB_CONTROL_ENDPOINT) && (EP_ADDR2DIR(cfg->ep_addr) == USB_EP_DIR_OUT)) {
		s_Device.eps[ep_abs_idx].ep_occupied = true;
	}
	s_Device.eps[ep_abs_idx].ep_enabled = true;
	return 0;
}
Exemplo n.º 9
0
void *k_malloc(size_t size)
{
	struct k_mem_block block;

	/*
	 * get a block large enough to hold an initial (hidden) block
	 * descriptor, as well as the space the caller requested
	 */
	size += sizeof(struct k_mem_block);
	if (k_mem_pool_alloc(_HEAP_MEM_POOL, &block, size, K_NO_WAIT) != 0) {
		return NULL;
	}

	/* save the block descriptor info at the start of the actual block */
	memcpy(block.data, &block, sizeof(struct k_mem_block));

	/* return address of the user area part of the block to the caller */
	return (char *)block.data + sizeof(struct k_mem_block);
}
Exemplo n.º 10
0
int pool_defrag_test(void)
{
	int rv;
	struct k_mem_block new_block;

	/* Get a bunch of blocks */

	rv = pool_block_get_work("k_mem_pool_alloc", pool_block_get_func,
				 defrag, ARRAY_SIZE(defrag));
	if (rv != TC_PASS) {
		return TC_FAIL;
	}


	k_sem_give(&DEFRAG_SEM);    /* Activate defrag_task */

	/*
	 * Block on getting another block from the pool.
	 * This will allow defrag_task to execute so that we can get some
	 * better code coverage.  500 ms is expected to more than sufficient
	 * time for defrag_task to finish.
	 */

	rv = k_mem_pool_alloc(&POOL_ID, &new_block, DEFRAG_BLK_TEST, 500);
	if (rv != -EAGAIN) {
		TC_ERROR("k_mem_pool_alloc() returned %d, not %d\n", rv,
			 -EAGAIN);
		return TC_FAIL;
	}

	rv = k_sem_take(&REGRESS_SEM, K_NO_WAIT);
	if (rv != 0) {
		TC_ERROR("defrag_task did not finish in allotted time!\n");
		return TC_FAIL;
	}

	/* Free the allocated blocks */

	free_blocks(defrag, ARRAY_SIZE(defrag));

	return TC_PASS;
}
Exemplo n.º 11
0
/* Receive encrypted data from network. Put that data into fifo
 * that will be read by https thread.
 */
static void ssl_received(struct net_context *context,
			 struct net_pkt *pkt,
			 int status,
			 void *user_data)
{
	struct http_client_ctx *http_ctx = user_data;
	struct rx_fifo_block *rx_data = NULL;
	struct k_mem_block block;
	int ret;

	ARG_UNUSED(context);
	ARG_UNUSED(status);

	if (pkt && !net_pkt_appdatalen(pkt)) {
		net_pkt_unref(pkt);
		return;
	}

	ret = k_mem_pool_alloc(http_ctx->https.pool, &block,
			       sizeof(struct rx_fifo_block),
			       BUF_ALLOC_TIMEOUT);
	if (ret < 0) {
		if (pkt) {
			net_pkt_unref(pkt);
		}

		return;
	}

	rx_data = block.data;
	rx_data->pkt = pkt;

	/* For freeing memory later */
	memcpy(&rx_data->block, &block, sizeof(struct k_mem_block));

	k_fifo_put(&http_ctx->https.mbedtls.ssl_ctx.rx_fifo, (void *)rx_data);

	/* Let the ssl_rx() to run */
	k_yield();
}
Exemplo n.º 12
0
int http_client_send_req(struct http_client_ctx *ctx,
			 struct http_client_request *req,
			 http_response_cb_t cb,
			 u8_t *response_buf,
			 size_t response_buf_len,
			 void *user_data,
			 s32_t timeout)
{
	int ret;

	if (!response_buf || response_buf_len == 0) {
		return -EINVAL;
	}

	ctx->rsp.response_buf = response_buf;
	ctx->rsp.response_buf_len = response_buf_len;

	client_reset(ctx);

	/* HTTPS connection is established in https_handler() */
	if (!ctx->is_https) {
		ret = tcp_connect(ctx);
		if (ret < 0 && ret != -EALREADY) {
			NET_DBG("TCP connect error (%d)", ret);
			return ret;
		}
	}

	if (!req->host) {
		req->host = ctx->server;
	}

	ctx->req.host = req->host;
	ctx->req.method = req->method;
	ctx->req.user_data = user_data;

	ctx->rsp.cb = cb;

#if defined(CONFIG_HTTPS)
	if (ctx->is_https) {
		struct tx_fifo_block *tx_data;
		struct k_mem_block block;

		ret = start_https(ctx);
		if (ret != 0 && ret != -EALREADY) {
			NET_ERR("HTTPS init failed (%d)", ret);
			goto out;
		}

		ret = k_mem_pool_alloc(ctx->https.pool, &block,
				       sizeof(struct tx_fifo_block),
				       BUF_ALLOC_TIMEOUT);
		if (ret < 0) {
			goto out;
		}

		tx_data = block.data;
		tx_data->req = req;

		memcpy(&tx_data->block, &block, sizeof(struct k_mem_block));

		/* We need to pass the HTTPS request to HTTPS thread because
		 * of the mbedtls API stack size requirements.
		 */
		k_fifo_put(&ctx->https.mbedtls.ssl_ctx.tx_fifo,
			   (void *)tx_data);

		/* Let the https_handler() to start to process the message.
		 *
		 * Note that if the timeout > 0 or is K_FOREVER, then this
		 * yield is not really necessary as the k_sem_take() will
		 * let the https handler thread to run. But if the timeout
		 * is K_NO_WAIT, then we need to let the https handler to
		 * run now.
		 */
		k_yield();
	} else
#endif /* CONFIG_HTTPS */
	{
		print_info(ctx, ctx->req.method);

		ret = http_request(ctx, req, BUF_ALLOC_TIMEOUT);
		if (ret < 0) {
			NET_DBG("Send error (%d)", ret);
			goto out;
		}
	}

	if (timeout != 0 && k_sem_take(&ctx->req.wait, timeout)) {
		ret = -ETIMEDOUT;
		goto out;
	}

	if (timeout == 0) {
		return -EINPROGRESS;
	}

	return 0;

out:
	tcp_disconnect(ctx);

	return ret;
}
Exemplo n.º 13
0
int pool_block_get_wt_func(struct k_mem_block *block, struct k_mem_pool *pool,
			   int size, s32_t timeout)
{
	return k_mem_pool_alloc(pool, block, size, timeout);
}