/**
*
*	Computes binomial coefficients and stores
*	them in a table. We only cache (n choose k)
*	for k>3, as for k<=3 it's faster to compute directly.
*	The computation is done recursively
*	(look up Pascal Triangle in Google for details)
*
*	\return
*	void
*
*/
static void compute_binom_coeffs(
	struct tm_subsets_cache *tm_subsets_cache)
{
	uint32_t n = 0;
	uint32_t k = 0;
	struct dal_context *dal_context = tm_subsets_cache->dal_context;

	/* shouldn't happen*/
	if (tm_subsets_cache->binom_coeffs == 0) {
		TM_ERROR("%s: binomial_coeff is zero\n", __func__);
		return;
	}
	for (n = 4; n <=
		tm_subsets_cache->num_display_paths; ++n) {

		int offset = (n-4)*
			(tm_subsets_cache->max_num_cofunc_targets - 3);

		for (k = 4; k <=
			tm_subsets_cache->max_num_cofunc_targets; ++k) {
			if (n == k) {
				tm_subsets_cache->binom_coeffs[offset+k-4] = 1;
				break;
			}
			/* compute recursively, if cached, it would
			 * have been computed in the previous n-loop
			 */
			tm_subsets_cache->binom_coeffs[offset+k-4] =
				get_binom_coeff(tm_subsets_cache, n-1, k-1) +
				get_binom_coeff(tm_subsets_cache, n-1, k);
		}
	}
}
示例#2
0
int touchmouse_set_device_mode(touchmouse_device *dev, touchmouse_mode mode)
{
	// We need to set two bits in a particular Feature report.  We first fetch
	// the current state of the feature report, set the interesting bits, and
	// write that feature report back to the device.
	TM_SPEW("touchmouse_set_device_mode: Reading current config flags\n");
	unsigned char data[27] = {0x22};
	int transferred = 0;
	transferred = hid_get_feature_report(dev->dev, data, 27);
	if (transferred > 0) {
		TM_SPEW("%d bytes received:\n", transferred);
		int i;
		for(i = 0; i < transferred; i++) {
			TM_SPEW("%02X ", data[i]);
		}
		TM_SPEW("\n");
	}
	if (transferred != 0x1B) {
		TM_ERROR("touchmouse_set_device_mode: Failed to read Feature 0x22 correctly; expected 27 bytes, got %d\n", transferred);
		return -1;
	}

	// This particular byte/setting appears to control the
	// "send all the raw input" flag.
	switch (mode) {
		case TOUCHMOUSE_DEFAULT:
			data[4] = 0x00;
			TM_DEBUG("touchmouse_set_device_mode: Trying to disable full touch updates...\n");
			break;
		case TOUCHMOUSE_RAW_IMAGE:
			data[4] = 0x06;
			TM_DEBUG("touchmouse_set_device_mode: Trying to enable full touch updates...\n");
			break;
	}

	transferred = hid_send_feature_report(dev->dev, data, 27);
	TM_SPEW("Wrote %d bytes\n", transferred);
	if (transferred == 0x1B) {
		TM_DEBUG("touchmouse_set_device_mode: Successfully set device mode.\n");
		return 0;
	}
	TM_ERROR("touchmouse_set_device_mode: Failed to set device mode.\n");
	return -1;
}
示例#3
0
int touchmouse_open(touchmouse_device **dev, touchmouse_device_info *dev_info)
{
	touchmouse_device* t_dev = (touchmouse_device*)malloc(sizeof(touchmouse_device));
	memset(t_dev, 0, sizeof(touchmouse_device));
	char* path = ((struct hid_device_info**)dev_info->opaque)[0]->path;
	t_dev->dev = hid_open_path(path);
	if (!t_dev->dev) {
		TM_ERROR("hid_open() failed for device with path %s\n", path);
		free(t_dev);
		return -1;
	}
	hid_set_nonblocking(t_dev->dev, 1); // Enable nonblocking reads
	*dev = t_dev;
	return 0;
}
/**
*	Check whether the given subset of display
*	paths is supported, i.e. if the display paths
*	can be enabled at the same time.
*
*	\param [in] displays: array of display paths for
*	which we will check whether they can be
*	enabled at the same time
*	\param [in] array_size: size of the above array
*
*	\return
*	CacheQueryResult enum:
*	Supported - the given subset is supported (cache hit)
*	NotSupported - the given subset is supported (cache hit)
*
*	Unknown - this display path subset is currently mapped
*	in the cache, but this is
*	the first query so it is not known whether
*	it's supported or not.
*	The caller must do a noncached lookup
*	and update the cache via
*	SetSubsetSupported() (cache miss)
*
*	DPMappingNotValid - this display path subset is currently
*	not being cached. The caller must
*	do a noncached lookup and not
*	attempt to update cache, since it will
*	fail (cache miss)
*
*/
enum cache_query_result dal_is_subset_supported(
	struct tm_subsets_cache *tm_subsets_cache,
	const uint32_t *displays,
	uint32_t array_size)
{
	uint32_t index;
	uint32_t word_num;
	uint32_t bit_mask;
	uint32_t ret;
	struct dal_context *dal_context = tm_subsets_cache->dal_context;

	ASSERT(displays != NULL);

	if (tm_subsets_cache->cofunc_cache == NULL ||
		displays != NULL) {
		ASSERT_CRITICAL(0);
		return CQR_DP_MAPPING_NOT_VALID;
	}

	if (array_size == 1) {

		ASSERT(displays[0] < tm_subsets_cache->num_display_paths);

		if (!tm_utils_test_bit(
			&tm_subsets_cache->cofunc_cache_single_valid,
			displays[0]))
			return CQR_UNKNOWN;
		if (tm_utils_test_bit(
			&tm_subsets_cache->cofunc_cache_single_valid,
			displays[0]))
			return CQR_SUPPORTED;
		/* mapping always valid for size == 1*/
		else
			return CQR_NOT_SUPPORTED;

	}

	/* check if this is a query for all connected
	 * (enabled) ones, which is the most common query observed
	 */
	if (array_size <= tm_subsets_cache->num_connected &&
		array_size <= tm_subsets_cache->max_num_cofunc_targets &&
		tm_subsets_cache->all_connected_supported != CQR_UNKNOWN) {

		if (all_connected(tm_subsets_cache, displays, array_size)) {
			if (tm_subsets_cache->all_connected_supported ==
				CQR_SUPPORTED)
				return CQR_SUPPORTED;
			/* if all connected are not supported, and the subset
			 * is smaller, it could be that it's supported,
			 * in that case we don't return here
			 */
			else if (array_size ==
				tm_subsets_cache->num_connected)
				return CQR_NOT_SUPPORTED;
		}
	}

	/* array_size > 1*/
	/* asking for a disconnected one with array_size > 1?*/
	/* the caller should do noncached lookup
	 * and return result, but not update the cache
	 */
	if (!is_dp_mapping_valid(tm_subsets_cache, displays, array_size))
		return CQR_DP_MAPPING_NOT_VALID;

	index = find_index(tm_subsets_cache, displays, array_size);
	if (index > tm_subsets_cache->max_num_combinations) {

		if (array_size > tm_subsets_cache->max_num_cofunc_targets)
			return CQR_NOT_SUPPORTED;

		/* this should not happen, fall back
		 * to noncached lookup without updating cache
		 */
		TM_ERROR("%s: Invalid index", __func__);
		return CQR_DP_MAPPING_NOT_VALID;
	}

	/* If we have index K, we want to read
	 * bits 2K and 2K+1 in the cache.
	 * Since cache is internally represented as an
	 * uint32_t array, we first convert this into bytes.
	 * 1 element has sizeof(int)*8 bits, so 2K'th bit is
	 * contained in the integer array element at location
	 * wordNum = (2K) / (sizeof(int)*8) = K / (sizeof(int)*4).
	 * bitMask is the offset within
	 * tm_subsets_cache->cofunc_cache[wordNum] - it's the
	 * remainder of the above division, multiplied by 2
	 * Note that 2 bits directly correspond to the
	 * enum 0x0 = Unknown, 0x1 = NotSupported, 0x2 = Supported...
	 * I.e. it's not that 1 bit is for valid or not, and the other
	 * for supported or not.
	 */

	/* *4 instead of *8 since every subset uses 2 bits*/
	word_num = index / (sizeof(uint32_t)*4);
	bit_mask = 0x3 << ((index % (sizeof(uint32_t)*4)) * 2);

	ret = (*(tm_subsets_cache->cofunc_cache +
		word_num) & bit_mask) >> ((index % (sizeof(uint32_t)*4)) * 2);
	return (enum cache_query_result)(ret);
}
/**
*
* Since we keep only connected displays, if connectivity
* information changes, we need to update cache to DP mapping
* and possibly clear the cache
*
* \param [in] display_index: display index of the path whose
*	connectivity information (may have) changed
*
* \param [in] connected: is the display path with the
* given index connected or disconnected
*
*
*/
void dal_update_display_mapping(
	struct tm_subsets_cache *tm_subsets_cache,
	const uint32_t display_index,
	bool connected)
{
	struct dal_context *dal_context = tm_subsets_cache->dal_context;
	uint32_t cache_size_in_bytes;
	uint32_t i;

	if (tm_subsets_cache->cofunc_cache == NULL ||
		display_index >= tm_subsets_cache->num_display_paths) {
		TM_ERROR("%s: cofunctional cache is NULL", __func__);
		return;
	}


	if (connected != tm_utils_test_bit(
		&tm_subsets_cache->connected, display_index)) {

		if (connected) {
			tm_utils_set_bit(
				&tm_subsets_cache->connected,
				display_index);
			++tm_subsets_cache->num_connected;
		} else {
			tm_utils_clear_bit(
				&tm_subsets_cache->connected,
				display_index);
			--tm_subsets_cache->num_connected;
		}
	} else
		/* cache is already up-to-date*/
		return;


	/* Need to increase the cache, unfortunately there's
	 * no good way to keep previous cached lookups,
	 * have to wipe out everything
	 */
	/* so we will have cache misses requiring noncached
	 * lookups. For disconnect, we don't decrease cache size.
	 */
	if (tm_subsets_cache->num_connected >
		tm_subsets_cache->num_cur_cached_paths) {

		/* we keep it in sync*/
		if (tm_subsets_cache->num_connected !=
			tm_subsets_cache->num_cur_cached_paths + 1)
			TM_WARNING("%s: Subset cache not in sync\n", __func__);




		++tm_subsets_cache->num_cur_cached_paths;

		dal_free(tm_subsets_cache->cofunc_cache);

		tm_subsets_cache->cofunc_cache = NULL;

		tm_subsets_cache->max_num_combinations =
				dal_get_num_of_combinations(
					tm_subsets_cache);


		/* need 2 bits per combination, also need to
		 * align to the size of uint32_t
		 * e.g. 53 combinations require 106 bits,
		 * this is 53/4 = 106/8 = 13.25,
		 * rounded down by default to 13
		 * so we add 1 byte to get 14
		 */
		/* but because we store in uint32_t*
		 * we actually need 16 bytes assuming 32/64-bit int
		 */
		cache_size_in_bytes = sizeof(uint32_t) *
			(1 + tm_subsets_cache->max_num_combinations/
			(4 * sizeof(uint32_t)));
		/* AllocMemory also zeros the cache*/
		tm_subsets_cache->cofunc_cache = dal_alloc(cache_size_in_bytes);
	}

	/* now update DP mapping arrays*/
	if (connected) {
		if (tm_subsets_cache->
			dp2_cache_mapping[display_index] !=
				MAPPING_NOT_SET) {

			if (tm_subsets_cache->
				all_connected_supported ==
					CQR_SUPPORTED)
				tm_subsets_cache->
					all_connected_supported =
						CQR_UNKNOWN;

			/* this index already mapped into some cache
			 * index, we can skip invalidating cache too
			 */
			return;
		}
		for (i = 0; i < tm_subsets_cache->num_cur_cached_paths; ++i) {
			if (tm_subsets_cache->
					cache_2dp_mapping[i] ==
						MAPPING_NOT_SET) {
				tm_subsets_cache->
					cache_2dp_mapping[i] =
						display_index;
				tm_subsets_cache->
					dp2_cache_mapping[display_index] = i;
				break;
			}

			/* check if current index is set,
			 * but disconnected, we can reuse it
			 */
			if (!tm_utils_test_bit(
				&tm_subsets_cache->connected,
				tm_subsets_cache->
					cache_2dp_mapping[i])) {

				uint32_t previous_index =
					tm_subsets_cache->
					cache_2dp_mapping[i];
				tm_subsets_cache->
					cache_2dp_mapping[i] =
						display_index;
				tm_subsets_cache->
					dp2_cache_mapping[
					display_index] = i;
				tm_subsets_cache->
					dp2_cache_mapping[
					previous_index] =
					MAPPING_NOT_SET;
				break;
			}
		}
		/* whatever happened above, we need
		 * to reset the cache, no need to
		 * reset single index array
		 */
		dal_invalidate_subsets_cache(tm_subsets_cache, false);
	} else {
		if (tm_subsets_cache->
			all_connected_supported ==
				CQR_NOT_SUPPORTED)
			tm_subsets_cache->
				all_connected_supported =
					CQR_UNKNOWN;
	}
}
示例#6
0
int touchmouse_process_events_timeout(touchmouse_device *dev, int milliseconds) {
	unsigned char data[256] = {};
	int res;
	uint64_t deadline;
	if(milliseconds < 0) {
		deadline = (uint64_t)(-1);
	} else {
		deadline = mono_timer_nanos() + (milliseconds * 1000000);
	}
	uint64_t nanos = mono_timer_nanos();
	if (nanos == 0 || deadline == 0) {
		TM_FATAL("touchmouse_process_events_timeout: timer function returned an error, erroring out since we have no timer\n");
		return -1;
	}
	do {
		res = hid_read_timeout(dev->dev, data, 255, (deadline - nanos) / 1000000 );
		if (res < 0 ) {
			TM_ERROR("hid_read() failed: %d - %ls\n", res, hid_error(dev->dev));
			return -2;
		} else if (res > 0) {
			// Dump contents of transfer
			TM_SPEW("touchmouse_process_events_timeout: got report: %d bytes:", res);
			int j;
			for(j = 0; j < res; j++) {
				TM_SPEW(" %02X", data[j]);
			}
			TM_SPEW("\n");
			// Interpret contents.
			report* r = (report*)data;
			// We only care about report ID 39 (0x27), which should be 32 bytes long
			if (res == 32 && r->report_id == 0x27) {
				TM_FLOOD("Timestamp: %02X\t%02X bytes:", r->timestamp, r->length - 1);
				int t;
				for(t = 0; t < r->length - 1; t++) {
					TM_FLOOD(" %02X", r->data[t]);
				}
				TM_FLOOD("\n");
				// Reset the decoder if we've seen one timestamp already from earlier
				// transfers, and this one doesn't match.
				if (dev->buf_index != 0 && r->timestamp != dev->timestamp_in_progress) {
					TM_FLOOD("touchmouse_process_events_timeout: timestamps don't match: got %d, expected %d\n", r->timestamp, dev->timestamp_in_progress);
					reset_decoder(dev); // Reset decoder for next transfer
				}
				dev->timestamp_in_progress = r->timestamp;
				for(t = 0; t < r->length - 1; t++) { // We subtract one byte because the length includes the timestamp byte.
					int res;
					// Yes, we process the low nybble first.  Embedded systems are funny like that.
					res = process_nybble(dev, r->data[t] & 0xf);
					if (res == DECODER_COMPLETE) {
						TM_SPEW("Frame completed, triggering callback\n");
						dev->timestamp_last_completed = r->timestamp;
						touchmouse_callback_info cbinfo;
						cbinfo.userdata = dev->userdata;
						cbinfo.image = dev->image;
						cbinfo.timestamp = dev->timestamp_last_completed;
						dev->cb(&cbinfo);
						reset_decoder(dev); // Reset decoder for next transfer
						return 0;
					}
					if (res == DECODER_ERROR) {
						TM_ERROR("Caught error in decoder, aborting decode!\n");
						reset_decoder(dev);
						return -1;
					}
					res = process_nybble(dev, (r->data[t] & 0xf0) >> 4);
					if (res == DECODER_COMPLETE) {
						TM_SPEW("Frame completed, triggering callback\n");
						dev->timestamp_last_completed = r->timestamp;
						touchmouse_callback_info cbinfo;
						cbinfo.userdata = dev->userdata;
						cbinfo.image = dev->image;
						cbinfo.timestamp = dev->timestamp_last_completed;
						dev->cb(&cbinfo);
						reset_decoder(dev); // Reset decoder for next transfer
						return 0;
					}
					if (res == DECODER_ERROR) {
						TM_ERROR("Caught error in decoder, aborting decode!\n");
						reset_decoder(dev);
						return -1;
					}
				}
			}
		}
示例#7
0
static int process_nybble(touchmouse_device *state, uint8_t nybble)
{
	TM_FLOOD("process_nybble: buf_index = %d\t%01x\n", state->buf_index, nybble);
	if (nybble >= 16) {
		TM_ERROR("process_nybble: got nybble >= 16, wtf: %d\n", nybble);
		return DECODER_ERROR;
	}
	if (state->next_is_run_encoded) {
		// Previous nybble was 0xF, so this one is (the number of bytes to skip - 3)
		if (state->buf_index + nybble + 3 > 181) {
			// Completing this decode would overrun the buffer.  We've been
			// given invalid data.  Abort.
			TM_ERROR("process_nybble: run encoded would overflow buffer: got 0xF%X (%d zeros) with only %d bytes to fill in buffer\n", nybble, nybble + 3, 181 - state->buf_index);
			return DECODER_ERROR;
		}
		int i;
		for(i = 0 ; i < nybble + 3; i++) {
			state->partial_image[state->buf_index] = 0;
			state->buf_index++;
		}
		state->next_is_run_encoded = 0;
	} else {
		if (nybble == 0xf) {
			state->next_is_run_encoded = 1;
		} else {
			state->partial_image[state->buf_index] = nybble;
			state->buf_index++;
		}
	}
	// If we're done collecting the data, unpack it into image as described above
	// This could probably be reworked to unpack the image in-place reusing the
	// image buffer, but right now I'm being lazy.
	if (state->buf_index == 181) {
		memset(state->image, 0, 195);
		int row;
		int i = 0;
		int startcol;
		int endcol;
		for(row = 0; row < 13 ; row++) {
			switch(row) {
				// Note: inclusive bounds
				case 0: startcol = 0x3;
						endcol = 0xb;
						break;
				case 1: startcol = 0x2;
						endcol = 0xc;
						break;
				case 2:
				case 3: startcol = 0x1;
						endcol = 0xd;
						break;
				default:
						startcol = 0x0;
						endcol = 0xe;
						break;
			}
			int col;
			for(col = startcol ; col <= endcol ; col++) {
				state->image[row * 15 + col] = decoder_table[state->partial_image[i++]];
			}
		}
		return DECODER_COMPLETE;
	}
	return DECODER_IN_PROGRESS;
}