Пример #1
0
/*
 * Transfer interrupt handler.
 */
static void qm_dma_isr_handler(const qm_dma_t dma,
			       const qm_dma_channel_id_t channel_id)
{
	uint32_t transfer_length;
	dma_cfg_prv_t *chan_cfg;
	volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;

	QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_TFR);
	QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));

	/* Clear interrupt */
	int_reg->clear_tfr_low = BIT(channel_id);

	/* Mask interrupts for this channel */
	int_reg->mask_tfr_low = BIT(channel_id) << 8;
	int_reg->mask_err_low = BIT(channel_id) << 8;

	/* Call the callback if registered and pass the
	 * transfer length */
	chan_cfg = &dma_channel_config[dma][channel_id];
	if (chan_cfg->client_callback) {
		transfer_length = get_transfer_length(dma, channel_id);
		chan_cfg->client_callback(chan_cfg->callback_context,
					  transfer_length, 0);
	}
}
Пример #2
0
static void ac_example_callback(uint32_t status)
{
	/* The analog comparators use level triggered interrupts so we will get
	 * a constant stream of interrupts if we do not mask them. Comment the
	 * following line if you want to get more than one interrupt. */
	QM_SCSS_INT->int_comparators_host_mask |= BIT(0);

	QM_PUTS("Comparator interrupt fired");
	QM_ASSERT(0x1 == status);
}
Пример #3
0
/*
 * Error interrupt handler.
 */
static void qm_dma_isr_err_handler(const qm_dma_t dma)
{
	uint32_t interrupt_channel_mask;
	dma_cfg_prv_t *chan_cfg;
	qm_dma_channel_id_t channel_id = 0;
	volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;

	QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_ERR);
	QM_ASSERT(int_reg->status_err_low);

	interrupt_channel_mask = int_reg->status_err_low;
	while (interrupt_channel_mask) {

		/* Find the channel that the interrupt is for */
		if (!(interrupt_channel_mask & 0x1)) {
			interrupt_channel_mask >>= 1;
			channel_id++;
			continue;
		}

		/* Clear the error interrupt for this channel */
		int_reg->clear_err_low = BIT(channel_id);

		/* Mask interrupts for this channel */
		int_reg->mask_block_low = BIT(channel_id) << 8;
		int_reg->mask_tfr_low = BIT(channel_id) << 8;
		int_reg->mask_err_low = BIT(channel_id) << 8;

		/* Call the callback if registered and pass the
		 * transfer error code */
		chan_cfg = &dma_channel_config[dma][channel_id];
		if (chan_cfg->client_callback) {
			chan_cfg->client_callback(chan_cfg->callback_context, 0,
						  -EIO);
		}

		interrupt_channel_mask >>= 1;
		channel_id++;
	}
Пример #4
0
int main(void)
{
	unsigned int cnt;

	QM_PUTS("Demonstrating QM_PUTS/QM_PRINTF functionality");

	for (cnt = 0; cnt < 10; cnt++) {
		QM_PRINTF("%d\n", cnt);
	}

	QM_PUTS("Demonstrating QM_ASSERT functionality");

	QM_ASSERT(1 == 0);

	return 0;
}
Пример #5
0
int qm_ss_spi_slave_select(const qm_ss_spi_t spi,
			   const qm_ss_spi_slave_select_t ss)
{
	QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);

	/* Check if the device reports as busy. */
	/* NOTE: check if QM_ASSERT is the right thing to do here */
	QM_ASSERT(
	    !(__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY));

	uint32_t spien = __builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN);
	spien &= ~QM_SS_SPI_SPIEN_SER_MASK;
	spien |= (ss << QM_SS_SPI_SPIEN_SER_OFFS);
	__builtin_arc_sr(spien, base[spi] + QM_SS_SPI_SPIEN);

	return 0;
}
Пример #6
0
Файл: qm_spi.c Проект: jeez/qmsi
static void handle_spi_interrupt(const qm_spi_t spi)
{
	qm_spi_reg_t *const controller = QM_SPI[spi];
	const qm_spi_async_transfer_t *transfer = spi_async_transfer[spi];
	const uint32_t int_status = controller->isr;

	QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
	if (int_status & QM_SPI_ISR_RXOIS) {
		if (transfer->callback) {
			transfer->callback(transfer->callback_data, -EIO,
					   QM_SPI_RX_OVERFLOW, rx_counter[spi]);
		}

		controller->rxoicr;
		controller->imr = QM_SPI_IMR_MASK_ALL;
		controller->ssienr = 0;
		return;
	}

	if (int_status & QM_SPI_ISR_RXFIS) {
		handle_rx_interrupt(spi);
	}

	if (transfer->rx_len == rx_counter[spi] &&
	    transfer->tx_len == tx_counter[spi] &&
	    (controller->sr & QM_SPI_SR_TFE) &&
	    !(controller->sr & QM_SPI_SR_BUSY)) {
		controller->imr = QM_SPI_IMR_MASK_ALL;
		controller->ssienr = 0;

		if (transfer->callback && tmode[spi] != QM_SPI_TMOD_RX) {
			transfer->callback(transfer->callback_data, 0,
					   QM_SPI_IDLE, transfer->tx_len);
		}

		return;
	}

	if (int_status & QM_SPI_ISR_TXEIS &&
	    transfer->tx_len > tx_counter[spi]) {
		handle_tx_interrupt(spi);
	}
}
Пример #7
0
/* Public Functions */
int qm_ss_spi_set_config(const qm_ss_spi_t spi,
			 const qm_ss_spi_config_t *const cfg)
{
	QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
	QM_CHECK(cfg, -EINVAL);
	/* Configuration can be changed only when SPI is disabled */
	/* NOTE: check if QM_ASSERT is the right thing to do here */
	QM_ASSERT((__builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN) &
		   QM_SS_SPI_SPIEN_EN) == 0);

	uint32_t ctrl = __builtin_arc_lr(QM_SS_SPI_0_BASE + QM_SS_SPI_CTRL);
	ctrl &= QM_SS_SPI_CTRL_CLK_ENA;
	ctrl |= cfg->frame_size << QM_SS_SPI_CTRL_DFS_OFFS;
	ctrl |= cfg->transfer_mode << QM_SS_SPI_CTRL_TMOD_OFFS;
	ctrl |= cfg->bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS;
	__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);

	__builtin_arc_sr(cfg->clk_divider, base[spi] + QM_SS_SPI_TIMING);

	return 0;
}
Пример #8
0
Файл: qm_spi.c Проект: jeez/qmsi
int qm_spi_dma_channel_config(
    const qm_spi_t spi, const qm_dma_t dma_ctrl_id,
    const qm_dma_channel_id_t dma_channel_id,
    const qm_dma_channel_direction_t dma_channel_direction)
{
	QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
	QM_CHECK(dma_ctrl_id < QM_DMA_NUM, -EINVAL);
	QM_CHECK(dma_channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);

	dma_context_t *dma_context_p = NULL;
	qm_dma_channel_config_t dma_chan_cfg = {0};
	dma_chan_cfg.handshake_polarity = QM_DMA_HANDSHAKE_POLARITY_HIGH;
	dma_chan_cfg.channel_direction = dma_channel_direction;
	dma_chan_cfg.client_callback = spi_dma_callback;
	dma_chan_cfg.transfer_type = QM_DMA_TYPE_SINGLE;

	/* Every data transfer performed by the DMA core corresponds to an SPI
	 * data frame, the SPI uses the number of bits determined by a previous
	 * qm_spi_set_config call where the frame size was specified. */
	switch (dfs[spi]) {
	case 1:
		dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8;
		break;

	case 2:
		dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_16;
		break;

	case 4:
		dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_32;
		break;

	default:
		/* The DMA core cannot handle 3 byte frame sizes. */
		return -EINVAL;
	}
	dma_chan_cfg.destination_transfer_width =
	    dma_chan_cfg.source_transfer_width;

	switch (dma_channel_direction) {
	case QM_DMA_MEMORY_TO_PERIPHERAL:

#if (QUARK_SE)
		dma_chan_cfg.handshake_interface =
		    (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_TX
					  : DMA_HW_IF_SPI_MASTER_1_TX;
#else
		dma_chan_cfg.handshake_interface = DMA_HW_IF_SPI_MASTER_0_TX;
#endif

		/* The DMA burst length has to fit in the space remaining in the
		 * TX FIFO after the watermark level, DMATDLR. */
		dma_chan_cfg.source_burst_length = SPI_DMA_WRITE_BURST_LENGTH;
		dma_chan_cfg.destination_burst_length =
		    SPI_DMA_WRITE_BURST_LENGTH;

		dma_context_p = &dma_context_tx[spi];
		break;

	case QM_DMA_PERIPHERAL_TO_MEMORY:

#if (QUARK_SE)
		dma_chan_cfg.handshake_interface =
		    (QM_SPI_MST_0 == spi) ? DMA_HW_IF_SPI_MASTER_0_RX
					  : DMA_HW_IF_SPI_MASTER_1_RX;
#else
		dma_chan_cfg.handshake_interface = DMA_HW_IF_SPI_MASTER_0_RX;
#endif
		/* The DMA burst length has to match the value of the receive
		 * watermark level, DMARDLR + 1. */
		dma_chan_cfg.source_burst_length = SPI_DMA_READ_BURST_LENGTH;
		dma_chan_cfg.destination_burst_length =
		    SPI_DMA_READ_BURST_LENGTH;

		dma_context_p = &dma_context_rx[spi];
		break;

	default:
		/* Memory to memory not allowed on SPI transfers. */
		return -EINVAL;
	}

	/* The DMA driver needs a pointer to the client callback function so
	 * that later we can identify to which SPI controller the DMA callback
	 * corresponds to as well as whether we are dealing with a TX or RX
	 * dma_context struct. */
	QM_ASSERT(dma_context_p);
	dma_chan_cfg.callback_context = dma_context_p;

	/* To be used on received DMA callback. */
	dma_context_p->spi_id = spi;
	dma_context_p->dma_channel_id = dma_channel_id;

	/* To be used on transfer setup. */
	dma_core[spi] = dma_ctrl_id;

	return qm_dma_channel_set_config(dma_ctrl_id, dma_channel_id,
					 &dma_chan_cfg);
}
Пример #9
0
Файл: qm_spi.c Проект: jeez/qmsi
/* DMA driver invoked callback. */
static void spi_dma_callback(void *callback_context, uint32_t len,
			     int error_code)
{
	QM_ASSERT(callback_context);

	int client_error = 0;
	uint32_t frames_expected;
	volatile bool *cb_pending_alternate_p;

	/* The DMA driver returns a pointer to a dma_context struct from which
	 * we find out the corresponding SPI device and transfer direction. */
	dma_context_t *const dma_context_p = callback_context;
	const qm_spi_t spi = dma_context_p->spi_id;
	QM_ASSERT(spi < QM_SPI_NUM);
	qm_spi_reg_t *const controller = QM_SPI[spi];
	const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
	QM_ASSERT(transfer);
	const uint8_t frame_size = dfs[spi];
	QM_ASSERT((frame_size == 1) || (frame_size == 2) || (frame_size == 4));

	/* DMA driver returns length in bytes but user expects number of frames.
	 */
	const uint32_t frames_transfered = len / frame_size;

	QM_ASSERT((dma_context_p == &dma_context_tx[spi]) ||
		  (dma_context_p == &dma_context_rx[spi]));

	if (dma_context_p == &dma_context_tx[spi]) {
		/* TX transfer. */
		frames_expected = transfer->tx_len;
		cb_pending_alternate_p = &dma_context_rx[spi].cb_pending;
	} else {
		/* RX transfer. */
		frames_expected = transfer->rx_len;
		cb_pending_alternate_p = &dma_context_tx[spi].cb_pending;
	}

	QM_ASSERT(cb_pending_alternate_p);
	QM_ASSERT(dma_context_p->cb_pending);
	dma_context_p->cb_pending = false;

	if (error_code) {
		/* Transfer failed, pass to client the error code returned by
		 * the DMA driver. */
		client_error = error_code;
	} else if (false == *cb_pending_alternate_p) {
		/* TX transfers invoke the callback before the TX data has been
		 * transmitted, we need to wait here. */
		wait_for_controller(controller);

		if (frames_transfered != frames_expected) {
			QM_ASSERT(frames_transfered < frames_expected);
			/* Callback triggered through a transfer terminate. */
			client_error = -ECANCELED;
		}
	} else {
		/* Controller busy due to alternate DMA channel active. */
		return;
	}

	/* Disable DMA setting and SPI controller. */
	controller->dmacr = 0;
	controller->ssienr = 0;

	if (transfer->callback) {
		transfer->callback(transfer->callback_data, client_error,
				   QM_SPI_IDLE, frames_transfered);
	}
}
Пример #10
0
Файл: main.c Проект: Hlotfy/qmsi
/* Sample UART0 QMSI application. */
int main(void)
{
	qm_uart_config_t cfg = {0};
	qm_uart_status_t uart_status __attribute__((unused)) = 0;
	int ret __attribute__((unused));
	const uint32_t xfer_irq_data = BANNER_IRQ_ID;
	const uint32_t xfer_dma_data = BANNER_DMA_ID;

	/* Set divisors to yield 115200bps baud rate. */
	/* Sysclk is set by boot ROM to hybrid osc in crystal mode (32MHz),
	 * peripheral clock divisor set to 1.
	 */
	cfg.baud_divisor = QM_UART_CFG_BAUD_DL_PACK(0, 17, 6);

	cfg.line_control = QM_UART_LC_8N1;
	cfg.hw_fc = false;

/* Mux out STDOUT_UART tx/rx pins and enable input for rx. */
#if (QUARK_SE)
	if (STDOUT_UART == QM_UART_0) {
		qm_pmux_select(QM_PIN_ID_18, QM_PMUX_FN_0);
		qm_pmux_select(QM_PIN_ID_19, QM_PMUX_FN_0);
		qm_pmux_input_en(QM_PIN_ID_18, true);
	} else {
		qm_pmux_select(QM_PIN_ID_16, QM_PMUX_FN_2);
		qm_pmux_select(QM_PIN_ID_17, QM_PMUX_FN_2);
		qm_pmux_input_en(QM_PIN_ID_17, true);
	}

#elif(QUARK_D2000)
	if (STDOUT_UART == QM_UART_0) {
		qm_pmux_select(QM_PIN_ID_12, QM_PMUX_FN_2);
		qm_pmux_select(QM_PIN_ID_13, QM_PMUX_FN_2);
		qm_pmux_input_en(QM_PIN_ID_13, true);
	} else {
		qm_pmux_select(QM_PIN_ID_20, QM_PMUX_FN_2);
		qm_pmux_select(QM_PIN_ID_21, QM_PMUX_FN_2);
		qm_pmux_input_en(QM_PIN_ID_21, true);
	}

#else
#error("Unsupported / unspecified processor detected.")
#endif

	clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_UARTA_REGISTER);
	qm_uart_set_config(STDOUT_UART, &cfg);

	QM_PRINTF("Starting: UART\n");

	/* Synchronous TX. */
	ret = qm_uart_write_buffer(STDOUT_UART, (uint8_t *)BANNER_STR,
				   sizeof(BANNER_STR));
	QM_ASSERT(0 == ret);

/* Register the UART interrupts. */
#if (STDOUT_UART_0)
	qm_irq_request(QM_IRQ_UART_0, qm_uart_0_isr);
#elif(STDOUT_UART_1)
	qm_irq_request(QM_IRQ_UART_1, qm_uart_1_isr);
#endif

	/* Used on both TX and RX. */
	async_xfer_desc.callback_data = (void *)&xfer_irq_data;

	/* IRQ based TX. */
	async_xfer_desc.data = (uint8_t *)BANNER_IRQ;
	async_xfer_desc.data_len = sizeof(BANNER_IRQ);
	async_xfer_desc.callback = uart_example_tx_callback;
	ret = qm_uart_irq_write(STDOUT_UART, &async_xfer_desc);
	QM_ASSERT(0 == ret);

	clk_sys_udelay(WAIT_1SEC);

	/* IRQ based RX. */
	rx_callback_invoked = false;

	async_xfer_desc.data = rx_buffer;
	async_xfer_desc.data_len = BIG_NUMBER_RX;
	async_xfer_desc.callback = uart_example_rx_callback;
	ret = qm_uart_irq_read(STDOUT_UART, &async_xfer_desc);
	QM_ASSERT(0 == ret);
	QM_PRINTF("\nWaiting for you to type %d characters... ?\n",
		  BIG_NUMBER_RX);

	wait_rx_callback_timeout(TIMEOUT_10SEC);

	if (!rx_callback_invoked) {
		/* RX complete callback was not invoked, we need to terminate
		 * the transfer in order to grab whatever is available in the RX
		 * buffer. */
		ret = qm_uart_irq_read_terminate(STDOUT_UART);
		QM_ASSERT(0 == ret);
	} else {
		/* RX complete callback was invoked and RX buffer was read, we
		 * wait in case the user does not stop typing after entering the
		 * exact amount of data that fits the RX buffer, i.e. there may
		 * be additional bytes in the RX FIFO that need to be read
		 * before continuing. */
		clk_sys_udelay(WAIT_5SEC);

		qm_uart_get_status(STDOUT_UART, &uart_status);
		if (QM_UART_RX_BUSY & uart_status) {
			/* There is some data in the RX FIFO, let's fetch it. */
			ret = qm_uart_irq_read(STDOUT_UART, &async_xfer_desc);
			QM_ASSERT(0 == ret);

			ret = qm_uart_irq_read_terminate(STDOUT_UART);
			QM_ASSERT(0 == ret);
		}
	}

	/* Register the DMA interrupts. */
	qm_irq_request(QM_IRQ_DMA_0, qm_dma_0_isr_0);
	qm_irq_request(QM_IRQ_DMA_ERR, qm_dma_0_isr_err);

	/* DMA controller initialization. */
	ret = qm_dma_init(QM_DMA_0);
	QM_ASSERT(0 == ret);

	/* Used on both TX and RX. */
	async_xfer_desc.callback_data = (void *)&xfer_dma_data;

	/* DMA based TX. */
	ret =
	    qm_uart_dma_channel_config(STDOUT_UART, QM_DMA_0, QM_DMA_CHANNEL_0,
				       QM_DMA_MEMORY_TO_PERIPHERAL);
	QM_ASSERT(0 == ret);

	async_xfer_desc.data = (uint8_t *)BANNER_DMA;
	async_xfer_desc.data_len = sizeof(BANNER_DMA);
	async_xfer_desc.callback = uart_example_tx_callback;
	ret = qm_uart_dma_write(STDOUT_UART, &async_xfer_desc);
	QM_ASSERT(0 == ret);

	clk_sys_udelay(WAIT_1SEC);

	/* DMA based RX. */
	rx_callback_invoked = false;

	ret =
	    qm_uart_dma_channel_config(STDOUT_UART, QM_DMA_0, QM_DMA_CHANNEL_0,
				       QM_DMA_PERIPHERAL_TO_MEMORY);
	QM_ASSERT(0 == ret);

	QM_PUTS("Waiting for data on STDOUT_UART (DMA mode) ...");
	async_xfer_desc.data = (uint8_t *)rx_buffer;
	async_xfer_desc.data_len = BIG_NUMBER_RX;
	async_xfer_desc.callback = uart_example_rx_callback;
	ret = qm_uart_dma_read(STDOUT_UART, &async_xfer_desc);
	QM_ASSERT(0 == ret);

	wait_rx_callback_timeout(TIMEOUT_10SEC);

	if (!rx_callback_invoked) {
		/* RX complete callback was not invoked, we need to terminate
		 * the transfer in order to grab whatever was written in the RX
		 * buffer. */
		ret = qm_uart_dma_read_terminate(STDOUT_UART);
		QM_ASSERT(0 == ret);
	}

	QM_PRINTF("\nFinished: UART\n");
	return 0;
}
Пример #11
0
/*
 * Transfer interrupt handler.
 * - Single block: TFR triggers an user callback invocation
 * - Multiblock (contiguous): TFR triggers an user callback invocation, block
 *   interrupts are silent
 * - Multiblock (linked list): Last block interrupt on each buffer triggers an
 *   user callback invocation, TFR is silent
 */
static void qm_dma_isr_handler(const qm_dma_t dma,
			       const qm_dma_channel_id_t channel_id)
{
	dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
	volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
	volatile qm_dma_chan_reg_t *chan_reg =
	    &QM_DMA[dma]->chan_reg[channel_id];
	uint32_t transfer_length =
	    get_transfer_length(dma, channel_id, prv_cfg);

	/* The status can't be asserted here as there is a possible race
	 * condition when terminating channels. It's possible that an interrupt
	 * can be generated before the terminate function masks the
	 * interrupts. */

	if (int_reg->status_int_low & QM_DMA_INT_STATUS_TFR) {

		QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));

		/* Transfer completed, clear interrupt */
		int_reg->clear_tfr_low = BIT(channel_id);

		/* If multiblock, the final block is also completed. */
		int_reg->clear_block_low = BIT(channel_id);

		/* Mask interrupts for this channel */
		int_reg->mask_block_low = BIT(channel_id) << 8;
		int_reg->mask_tfr_low = BIT(channel_id) << 8;
		int_reg->mask_err_low = BIT(channel_id) << 8;

		/* Clear llp register */
		chan_reg->llp_low = 0;

		/*
		 * Call the callback if registered and pass the transfer length.
		 */
		if (prv_cfg->client_callback) {
			/* Single block or contiguous multiblock. */
			prv_cfg->client_callback(prv_cfg->callback_context,
						 transfer_length, 0);
		}
	} else if (int_reg->status_int_low & QM_DMA_INT_STATUS_BLOCK) {
		/* Block interrupts are only unmasked in multiblock mode. */
		QM_ASSERT(int_reg->status_block_low & BIT(channel_id));

		/* Block completed, clear interrupt. */
		int_reg->clear_block_low = BIT(channel_id);

		prv_cfg->num_blocks_int_pending--;

		if (NULL != prv_cfg->lli_tail &&
		    0 == prv_cfg->num_blocks_int_pending) {
			/*
			 * Linked list mode, invoke callback if this is last
			 * block of buffer.
			 */
			if (prv_cfg->client_callback) {
				prv_cfg->client_callback(
				    prv_cfg->callback_context, transfer_length,
				    0);
			}

			/* Buffer done, set for next buffer. */
			prv_cfg->num_blocks_int_pending =
			    prv_cfg->num_blocks_per_buffer;

		} else if (NULL == prv_cfg->lli_tail) {
			QM_ASSERT(prv_cfg->num_blocks_int_pending <
				  prv_cfg->num_blocks_per_buffer);
			if (1 == prv_cfg->num_blocks_int_pending) {
				/*
				 * Contiguous mode. We have just processed the
				 * next to last block, clear CFG.RELOAD so
				 * that the next block is the last one to be
				 * transfered.
				 */
				chan_reg->cfg_low &=
				    ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
				chan_reg->cfg_low &=
				    ~QM_DMA_CFG_L_RELOAD_DST_MASK;
			}
		}
	}
}