Пример #1
0
int mwDirectory_search(struct mwDirectory *dir, const char *query) {
  struct mwServiceDirectory *srvc;
  struct mwChannel *chan;
  struct mwPutBuffer *b;
  struct mwOpaque o;
  int ret;

  g_return_val_if_fail(dir != NULL, -1);
  g_return_val_if_fail(MW_DIRECTORY_IS_OPEN(dir), -1);
  g_return_val_if_fail(query != NULL, -1);
  g_return_val_if_fail(*query != '\0', -1);

  srvc = dir->service;
  g_return_val_if_fail(srvc != NULL, -1);

  chan = srvc->channel;
  g_return_val_if_fail(chan != NULL, -1);

  b = mwPutBuffer_new();
  guint32_put(b, map_request(dir));
  guint32_put(b, dir->id);
  guint16_put(b, 0x0061);      /* some magic? */
  guint32_put(b, 0x00000008);  /* seek results */
  mwString_put(b, query);

  mwPutBuffer_finalize(&o, b);
  ret = mwChannel_send(chan, action_search, &o);
  mwOpaque_clear(&o);

  return ret;
}
Пример #2
0
static void map_tio_request(struct kthread_work *work)
{
	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);

	if (map_request(tio) == DM_MAPIO_REQUEUE)
		dm_requeue_original_request(tio, false);
}
Пример #3
0
static int dir_open(struct mwDirectory *dir) {
  struct mwServiceDirectory *srvc;
  struct mwChannel *chan;
  struct mwPutBuffer *b;
  struct mwOpaque o;
  int ret;

  g_return_val_if_fail(dir != NULL, -1);

  srvc = dir->service;
  g_return_val_if_fail(srvc != NULL, -1);

  chan = srvc->channel;
  g_return_val_if_fail(chan != NULL, -1);

  b = mwPutBuffer_new();
  guint32_put(b, map_request(dir));

  /* unsure about these three bytes */
  gboolean_put(b, FALSE);
  guint16_put(b, 0x0000);

  guint32_put(b, dir->book->id);
  mwString_put(b, dir->book->name);

  mwPutBuffer_finalize(&o, b);
  ret = mwChannel_send(chan, action_open, &o);
  mwOpaque_clear(&o);

  return ret;
}
Пример #4
0
static void map_tio_request(struct kthread_work *work)
{
	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
	struct request *rq = tio->orig;
	struct mapped_device *md = tio->md;

	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
		dm_requeue_original_request(md, rq);
}
Пример #5
0
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *bd)
{
	struct request *rq = bd->rq;
	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
	struct mapped_device *md = tio->md;
	struct dm_target *ti = md->immutable_target;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		ti = dm_table_find_target(map, 0);
		dm_put_live_table(md, srcu_idx);
	}

	/*
	 * On suspend dm_stop_queue() handles stopping the blk-mq
	 * request_queue BUT: even though the hw_queues are marked
	 * BLK_MQ_S_STOPPED at that point there is still a race that
	 * is allowing block/blk-mq.c to call ->queue_rq against a
	 * hctx that it really shouldn't.  The following check guards
	 * against this rarity (albeit _not_ race-free).
	 */
	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
		return BLK_MQ_RQ_QUEUE_BUSY;

	if (ti->type->busy && ti->type->busy(ti))
		return BLK_MQ_RQ_QUEUE_BUSY;

	dm_start_request(md, rq);

	/* Init tio using md established in .init_request */
	init_tio(tio, rq, md);

	/*
	 * Establish tio->ti before calling map_request().
	 */
	tio->ti = ti;

	/* Direct call is fine since .queue_rq allows allocations */
	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
		/* Undo dm_start_request() before requeuing */
		rq_end_stats(md, rq);
		rq_completed(md, rq_data_dir(rq), false);
		return BLK_MQ_RQ_QUEUE_BUSY;
	}

	return BLK_MQ_RQ_QUEUE_OK;
}
Пример #6
0
void handle_event(xcb_generic_event_t *evt)
{
	uint8_t resp_type = XCB_EVENT_RESPONSE_TYPE(evt);
	switch (resp_type) {
		case XCB_MAP_REQUEST:
			map_request(evt);
			break;
		case XCB_DESTROY_NOTIFY:
			destroy_notify(evt);
			break;
		case XCB_UNMAP_NOTIFY:
			unmap_notify(evt);
			break;
		case XCB_CLIENT_MESSAGE:
			client_message(evt);
			break;
		case XCB_CONFIGURE_REQUEST:
			configure_request(evt);
			break;
		case XCB_PROPERTY_NOTIFY:
			property_notify(evt);
			break;
		case XCB_ENTER_NOTIFY:
			enter_notify(evt);
			break;
		case XCB_MOTION_NOTIFY:
			motion_notify(evt);
			break;
		case XCB_FOCUS_IN:
			focus_in(evt);
			break;
		case 0:
			process_error(evt);
			break;
		default:
			if (randr && resp_type == randr_base + XCB_RANDR_SCREEN_CHANGE_NOTIFY) {
				update_monitors();
			}
			break;
	}
}
Пример #7
0
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *bd)
{
	struct request *rq = bd->rq;
	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
	struct mapped_device *md = tio->md;
	struct dm_target *ti = md->immutable_target;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		ti = dm_table_find_target(map, 0);
		dm_put_live_table(md, srcu_idx);
	}

	if (ti->type->busy && ti->type->busy(ti))
		return BLK_MQ_RQ_QUEUE_BUSY;

	dm_start_request(md, rq);

	/* Init tio using md established in .init_request */
	init_tio(tio, rq, md);

	/*
	 * Establish tio->ti before calling map_request().
	 */
	tio->ti = ti;

	/* Direct call is fine since .queue_rq allows allocations */
	if (map_request(tio) == DM_MAPIO_REQUEUE) {
		/* Undo dm_start_request() before requeuing */
		rq_end_stats(md, rq);
		rq_completed(md, rq_data_dir(rq), false);
		blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
		return BLK_MQ_RQ_QUEUE_BUSY;
	}

	return BLK_MQ_RQ_QUEUE_OK;
}