Esempio n. 1
0
static int _timeshift_skip
  ( timeshift_t *ts, int64_t req_time, int64_t cur_time,
    timeshift_file_t *cur_file, timeshift_file_t **new_file,
    timeshift_index_iframe_t **iframe )
{
  timeshift_index_iframe_t *tsi  = *iframe;
  timeshift_file_t         *tsf  = cur_file;
  int64_t                   sec  = req_time / (1000000 * TIMESHIFT_FILE_PERIOD);
  int                       back = (req_time < cur_time) ? 1 : 0;
  int                       end  = 0;
  
  /* Hold local ref */
  if (cur_file)
    cur_file->refcount++;

  /* Coarse search */
  if (!tsi) {
    while (tsf && !end) {
      if (back) {
        if ((tsf->time <= sec) &&
            (tsi = TAILQ_LAST(&tsf->iframes, timeshift_index_iframe_list)))
          break;
        tsf = timeshift_filemgr_prev(tsf, &end, 1);
      } else {
        if ((tsf->time >= sec) &&
            (tsi = TAILQ_FIRST(&tsf->iframes)))
          break;
        tsf = timeshift_filemgr_next(tsf, &end, 0);
      }
      tsi = NULL;
    }
  }

  /* Fine search */
  if (back) {
    while (!end && tsf && tsi && (tsi->time > req_time)) {
      tsi = TAILQ_PREV(tsi, timeshift_index_iframe_list, link);
      while (!end && tsf && !tsi) {
        if ((tsf = timeshift_filemgr_prev(tsf, &end, 1)))
          tsi = TAILQ_LAST(&tsf->iframes, timeshift_index_iframe_list);
      }
    }
  } else {
    while (!end && tsf && tsi && (tsi->time < req_time)) {
      tsi = TAILQ_NEXT(tsi, link);
      while (!end && tsf && !tsi) {
        if ((tsf = timeshift_filemgr_next(tsf, &end, 0)))
          tsi = TAILQ_FIRST(&tsf->iframes);
      }
    }
  }

  /* End */
  if (!tsf || !tsi)
    end = 1;

  /* Find start/end of buffer */
  if (end) {
    if (back) {
      tsf = timeshift_filemgr_oldest(ts);
      tsi = NULL;
      while (tsf && !tsi) {
        if (!(tsi = TAILQ_FIRST(&tsf->iframes)))
          tsf = timeshift_filemgr_next(tsf, &end, 0);
      }
      end = -1;
    } else {
      tsf = timeshift_filemgr_get(ts, 0);
      tsi = NULL;
      while (tsf && !tsi) {
        if (!(tsi = TAILQ_LAST(&tsf->iframes, timeshift_index_iframe_list)))
          tsf = timeshift_filemgr_prev(tsf, &end, 0);
      }
      end = 1;
    }
  }

  if (cur_file)
    cur_file->refcount--;

  /* Done */
  *new_file = tsf;
  *iframe   = tsi;
  return end;
}
Esempio n. 2
0
/*
 * Return true if request is done, false otherwise
 *
 * A request is done, if we received response for the given request.
 * A request vector is done if we received responses for all its
 * fragments.
 */
bool
req_done(struct conn *conn, struct msg *msg)
{
    struct msg *cmsg, *pmsg; /* current and previous message */
    uint64_t id;             /* fragment id */
    uint32_t nfragment;      /* # fragment */

    ASSERT(conn->client && !conn->proxy);
    ASSERT(msg->request);

    if (!msg->done) {
        return false;
    }

    id = msg->frag_id;
    if (id == 0) {
        return true;
    }

    if (msg->fdone) {
        /* request has already been marked as done */
        return true;
    }

    if (msg->nfrag_done < msg->nfrag) {
        return false;
    }

    /* check all fragments of the given request vector are done */

    for (pmsg = msg, cmsg = TAILQ_PREV(msg, msg_tqh, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         pmsg = cmsg, cmsg = TAILQ_PREV(cmsg, msg_tqh, c_tqe)) {

        if (!cmsg->done) {
            return false;
        }
    }

    for (pmsg = msg, cmsg = TAILQ_NEXT(msg, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         pmsg = cmsg, cmsg = TAILQ_NEXT(cmsg, c_tqe)) {

        if (!cmsg->done) {
            return false;
        }
    }

    /*
     * At this point, all the fragments including the last fragment have
     * been received.
     *
     * Mark all fragments of the given request vector to be done to speed up
     * future req_done calls for any of fragments of this request
     */

    msg->fdone = 1;
    nfragment = 0;

    for (pmsg = msg, cmsg = TAILQ_PREV(msg, msg_tqh, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         pmsg = cmsg, cmsg = TAILQ_PREV(cmsg, msg_tqh, c_tqe)) {
        cmsg->fdone = 1;
        nfragment++;
    }

    for (pmsg = msg, cmsg = TAILQ_NEXT(msg, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         pmsg = cmsg, cmsg = TAILQ_NEXT(cmsg, c_tqe)) {
        cmsg->fdone = 1;
        nfragment++;
    }

    ASSERT(msg->frag_owner->nfrag == nfragment);

    msg->post_coalesce(msg->frag_owner);

    log_debug(LOG_DEBUG, "req from c %d with fid %"PRIu64" and %"PRIu32" "
              "fragments is done", conn->sd, id, nfragment);

    return true;
}
Esempio n. 3
0
/*
 * Return true if request is in error, false otherwise
 *
 * A request is in error, if there was an error in receiving response for the
 * given request. A multiget request is in error if there was an error in
 * receiving response for any its fragments.
 */
bool
req_error(struct conn *conn, struct msg *msg)
{
    struct msg *cmsg; /* current message */
    uint64_t id;
    uint32_t nfragment;

    ASSERT(msg->request && req_done(conn, msg));

    if (msg->error) {
        return true;
    }

    id = msg->frag_id;
    if (id == 0) {
        return false;
    }

    if (msg->ferror) {
        /* request has already been marked to be in error */
        return true;
    }

    /* check if any of the fragments of the given request are in error */

    for (cmsg = TAILQ_PREV(msg, msg_tqh, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         cmsg = TAILQ_PREV(cmsg, msg_tqh, c_tqe)) {

        if (cmsg->error) {
            goto ferror;
        }
    }

    for (cmsg = TAILQ_NEXT(msg, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         cmsg = TAILQ_NEXT(cmsg, c_tqe)) {

        if (cmsg->error) {
            goto ferror;
        }
    }

    return false;

ferror:

    /*
     * Mark all fragments of the given request to be in error to speed up
     * future req_error calls for any of fragments of this request
     */

    msg->ferror = 1;
    nfragment = 1;

    for (cmsg = TAILQ_PREV(msg, msg_tqh, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         cmsg = TAILQ_PREV(cmsg, msg_tqh, c_tqe)) {
        cmsg->ferror = 1;
        nfragment++;
    }

    for (cmsg = TAILQ_NEXT(msg, c_tqe);
         cmsg != NULL && cmsg->frag_id == id;
         cmsg = TAILQ_NEXT(cmsg, c_tqe)) {
        cmsg->ferror = 1;
        nfragment++;
    }

    log_debug(LOG_DEBUG, "req from c %d with fid %"PRIu64" and %"PRIu32" "
              "fragments is in error", conn->sd, id, nfragment);

    return true;
}
Esempio n. 4
0
void
process_scope(scope_t *scope)
{
	/*
	 * We are "leaving" this scope.  We should now have
	 * enough information to process the lists of scopes
	 * we encapsulate.
	 */
	scope_t *cur_scope;
	u_int skip_patch_count;
	u_int skip_instr_count;

	cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq);
	skip_patch_count = 0;
	skip_instr_count = 0;
	while (cur_scope != NULL) {
		u_int patch0_patch_skip;

		patch0_patch_skip = 0;
		switch (cur_scope->type) {
		case SCOPE_IF:
		case SCOPE_ELSE_IF:
			if (skip_instr_count != 0) {
				/* Create a tail patch */
				patch0_patch_skip++;
				cur_scope->patches[1].skip_patch =
				    skip_patch_count + 1;
				cur_scope->patches[1].skip_instr =
				    skip_instr_count;
			}

			/* Count Head patch */
			patch0_patch_skip++;

			/* Count any patches contained in our inner scope */
			patch0_patch_skip += cur_scope->inner_scope_patches;

			cur_scope->patches[0].skip_patch = patch0_patch_skip;
			cur_scope->patches[0].skip_instr =
			    cur_scope->end_addr - cur_scope->begin_addr;

			skip_instr_count += cur_scope->patches[0].skip_instr;

			skip_patch_count += patch0_patch_skip;
			if (cur_scope->type == SCOPE_IF) {
				scope->inner_scope_patches += skip_patch_count;
				skip_patch_count = 0;
			        skip_instr_count = 0;
			}
			break;
		case SCOPE_ELSE:
			/* Count any patches contained in our innter scope */
			skip_patch_count += cur_scope->inner_scope_patches;

			skip_instr_count += cur_scope->end_addr
					  - cur_scope->begin_addr;
			break;
		case SCOPE_ROOT:
			stop("Unexpected scope type encountered", EX_SOFTWARE);
			/* NOTREACHED */
		}

		cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links);
	}
}
Esempio n. 5
0
enum cmd_retval
cmd_swap_pane_exec(struct cmd *self, struct cmd_q *cmdq)
{
	struct args		*args = self->args;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*tmp_wp, *src_wp, *dst_wp;
	struct layout_cell	*src_lc, *dst_lc;
	u_int			 sx, sy, xoff, yoff;

	dst_wl = cmd_find_pane(cmdq, args_get(args, 't'), NULL, &dst_wp);
	if (dst_wl == NULL)
		return (CMD_RETURN_ERROR);
	dst_w = dst_wl->window;
	server_unzoom_window(dst_w);

	if (!args_has(args, 's')) {
		src_w = dst_w;
		if (args_has(self->args, 'D')) {
			src_wp = TAILQ_NEXT(dst_wp, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_FIRST(&dst_w->panes);
		} else if (args_has(self->args, 'U')) {
			src_wp = TAILQ_PREV(dst_wp, window_panes, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_LAST(&dst_w->panes, window_panes);
		} else {
			src_wl = cmd_find_pane_marked(cmdq, NULL, NULL,
			    &src_wp);
			if (src_wl == NULL)
				return (CMD_RETURN_ERROR);
			src_w = src_wl->window;
		}
	} else {
		src_wl = cmd_find_pane_marked(cmdq, args_get(args, 's'), NULL,
		    &src_wp);
		if (src_wl == NULL)
			return (CMD_RETURN_ERROR);
		src_w = src_wl->window;
	}
	server_unzoom_window(src_w);

	if (src_wp == dst_wp)
		return (CMD_RETURN_NORMAL);

	tmp_wp = TAILQ_PREV(dst_wp, window_panes, entry);
	TAILQ_REMOVE(&dst_w->panes, dst_wp, entry);
	TAILQ_REPLACE(&src_w->panes, src_wp, dst_wp, entry);
	if (tmp_wp == src_wp)
		tmp_wp = dst_wp;
	if (tmp_wp == NULL)
		TAILQ_INSERT_HEAD(&dst_w->panes, src_wp, entry);
	else
		TAILQ_INSERT_AFTER(&dst_w->panes, tmp_wp, src_wp, entry);

	src_lc = src_wp->layout_cell;
	dst_lc = dst_wp->layout_cell;
	src_lc->wp = dst_wp;
	dst_wp->layout_cell = src_lc;
	dst_lc->wp = src_wp;
	src_wp->layout_cell = dst_lc;

	src_wp->window = dst_w;
	dst_wp->window = src_w;

	sx = src_wp->sx; sy = src_wp->sy;
	xoff = src_wp->xoff; yoff = src_wp->yoff;
	src_wp->xoff = dst_wp->xoff; src_wp->yoff = dst_wp->yoff;
	window_pane_resize(src_wp, dst_wp->sx, dst_wp->sy);
	dst_wp->xoff = xoff; dst_wp->yoff = yoff;
	window_pane_resize(dst_wp, sx, sy);

	if (!args_has(self->args, 'd')) {
		if (src_w != dst_w) {
			window_set_active_pane(src_w, dst_wp);
			window_set_active_pane(dst_w, src_wp);
		} else {
			tmp_wp = dst_wp;
			if (!window_pane_visible(tmp_wp))
				tmp_wp = src_wp;
			window_set_active_pane(src_w, tmp_wp);
		}
	} else {
		if (src_w->active == src_wp)
			window_set_active_pane(src_w, dst_wp);
		if (dst_w->active == dst_wp)
			window_set_active_pane(dst_w, src_wp);
	}
	if (src_w != dst_w) {
		if (src_w->last == src_wp)
			src_w->last = NULL;
		if (dst_w->last == dst_wp)
			dst_w->last = NULL;
	}
	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	return (CMD_RETURN_NORMAL);
}
Esempio n. 6
0
/*
 * reclaim leaf nodes using the LRU replacement algorithm.
 * we don't select a node as a possible victim if the node is
 *  - already reclaimed
 *  - the counter value is larger than the threshold
 *  - both of its parent and sibling's subtree are larger than the threshold
 * then, we compare parent's count with sibling's subtree sum.
 * - if parent is larger, reduce the subtree into parent.
 * - otherwise, reclaim this node and parent, and leave the sibling's subtree.
 */
static void
_reclaim(aguri_t *tp, int n) {
	anode_t *np, *after, *parent, *sibling;
	u_int64_t thresh, sum, moved_to_head;

	thresh = tp->tr_count / 64;
	if (thresh == 0)
		thresh = 1;
	while (tp->tr_nfree < n) {
		/*
		 * select a victim from the LRU list.
		 * exclude nodes whose count is more than the threshold.
		 */
		moved_to_head = 0;
		np = TAILQ_LAST(&tp->tr_lru, _lru);
		while (np != NULL) {
			if (np->tn_intree == 0) {
				/* free node */
				np =  TAILQ_PREV(np, _lru, tn_chain);
				continue;
			} else if (np->tn_count > thresh) {
				/* if bigger than thresh, move it to head */
				after = np;
				np = TAILQ_PREV(np, _lru, tn_chain);
				if (moved_to_head > 3)
					continue;
				TAILQ_REMOVE(&tp->tr_lru, after, tn_chain);
				TAILQ_INSERT_HEAD(&tp->tr_lru, after, tn_chain);
				moved_to_head++;
				continue;
			}
			/*
			 * a possible victim found.
			 * see if either its parent or sibling's subtree is
			 * smaller than the threshold.
			 * also check if parent is not the top node.
			 */
			parent = np->tn_parent;
			if (parent->tn_left == np)
				sibling = parent->tn_right;
			else
				sibling = parent->tn_left;
			sum = _subsum(sibling);
			if (sum > thresh && (parent->tn_count > thresh
			    || parent == tp->tr_top)) {
				after = np;
				np =  TAILQ_PREV(np, _lru, tn_chain);
				if (moved_to_head > 3)
					continue;
				TAILQ_REMOVE(&tp->tr_lru, after, tn_chain);
				TAILQ_INSERT_HEAD(&tp->tr_lru, after, tn_chain);
				moved_to_head++;
				continue;
			}

			/*
			 * at this point, we are about to reclaim this victim.
			 * compare parent's count with sibling's subtree sum.
			 * if parent is larger, reduce the subtree into parent.
			 * otherwise, reclaim this node and parent, and leave
			 * sibling.
			 */
			if (parent->tn_count > sum || parent == tp->tr_top)
				_reduce(tp, np->tn_parent, 0);
			else
				_free(tp, np);
			break;
		}

		if (np == NULL) {
			thresh *= 2;
#if 0
			fprintf(stderr, "thresh increased to %llu\n", thresh);
#endif
		}
	}
}
Esempio n. 7
0
enum cmd_retval
join_pane(struct cmd *self, struct cmd_q *cmdq, int not_same_window)
{
	struct args		*args = self->args;
	struct session		*dst_s;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*src_wp, *dst_wp;
	char			*cause;
	int			 size, percentage, dst_idx;
	enum layout_type	 type;
	struct layout_cell	*lc;

	dst_wl = cmd_find_pane(cmdq, args_get(args, 't'), &dst_s, &dst_wp);
	if (dst_wl == NULL)
		return (CMD_RETURN_ERROR);
	dst_w = dst_wl->window;
	dst_idx = dst_wl->idx;
	server_unzoom_window(dst_w);

	src_wl = cmd_find_pane(cmdq, args_get(args, 's'), NULL, &src_wp);
	if (src_wl == NULL)
		return (CMD_RETURN_ERROR);
	src_w = src_wl->window;
	server_unzoom_window(src_w);

	if (not_same_window && src_w == dst_w) {
		cmdq_error(cmdq, "can't join a pane to its own window");
		return (CMD_RETURN_ERROR);
	}
	if (!not_same_window && src_wp == dst_wp) {
		cmdq_error(cmdq, "source and target panes must be different");
		return (CMD_RETURN_ERROR);
	}

	type = LAYOUT_TOPBOTTOM;
	if (args_has(args, 'h'))
		type = LAYOUT_LEFTRIGHT;

	size = -1;
	if (args_has(args, 'l')) {
		size = args_strtonum(args, 'l', 0, INT_MAX, &cause);
		if (cause != NULL) {
			cmdq_error(cmdq, "size %s", cause);
			free(cause);
			return (CMD_RETURN_ERROR);
		}
	} else if (args_has(args, 'p')) {
		percentage = args_strtonum(args, 'p', 0, 100, &cause);
		if (cause != NULL) {
			cmdq_error(cmdq, "percentage %s", cause);
			free(cause);
			return (CMD_RETURN_ERROR);
		}
		if (type == LAYOUT_TOPBOTTOM)
			size = (dst_wp->sy * percentage) / 100;
		else
			size = (dst_wp->sx * percentage) / 100;
	}
	lc = layout_split_pane(dst_wp, type, size, args_has(args, 'b'));
	if (lc == NULL) {
		cmdq_error(cmdq, "create pane failed: pane too small");
		return (CMD_RETURN_ERROR);
	}

	layout_close_pane(src_wp);

	if (src_w->active == src_wp) {
		src_w->active = TAILQ_PREV(src_wp, window_panes, entry);
		if (src_w->active == NULL)
			src_w->active = TAILQ_NEXT(src_wp, entry);
	}
	TAILQ_REMOVE(&src_w->panes, src_wp, entry);

	if (window_count_panes(src_w) == 0)
		server_kill_window(src_w);
	else
		notify_window_layout_changed(src_w);

	src_wp->window = dst_w;
	TAILQ_INSERT_AFTER(&dst_w->panes, dst_wp, src_wp, entry);
	layout_assign_pane(lc, src_wp);

	recalculate_sizes();

	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	if (!args_has(args, 'd')) {
		window_set_active_pane(dst_w, src_wp);
		session_select(dst_s, dst_idx);
		server_redraw_session(dst_s);
	} else
		server_status_session(dst_s);

	notify_window_layout_changed(dst_w);
	return (CMD_RETURN_NORMAL);
}
Esempio n. 8
0
/*
 * creat a leaf node and its branch point.
 * then, insert the branch point as the parent of the specified node.
 */
static anode_t *
_alloc(aguri_t *tp, const void *key, anode_t *np) {
	anode_t *bp = NULL, *leaf = NULL;

	/* reclaim two nodes from the LRU list */
	leaf = TAILQ_LAST(&tp->tr_lru, _lru);
	while (leaf->tn_intree)
		leaf = TAILQ_PREV(leaf, _lru, tn_chain);
	TAILQ_REMOVE(&tp->tr_lru, leaf, tn_chain);
	TAILQ_INSERT_HEAD(&tp->tr_lru, leaf, tn_chain);
	leaf->tn_intree = 1;
	tp->tr_nfree--;
	_nreset(leaf);
	memcpy(leaf->tn_key, key, tp->tr_keylen/8);
	leaf->tn_prefixlen = tp->tr_keylen;

	assert(tp->tr_nfree > 0);
	bp = TAILQ_LAST(&tp->tr_lru, _lru);
	while (bp->tn_intree)
		bp = TAILQ_PREV(bp, _lru, tn_chain);
	TAILQ_REMOVE(&tp->tr_lru, bp, tn_chain);
	bp->tn_intree = 1;
	tp->tr_nfree--;
	_nreset(bp);
	bp->tn_prefixlen = _common(np->tn_key, key, tp->tr_keylen, bp->tn_key);

	if (bp->tn_prefixlen >= np->tn_prefixlen) {
		/*
		 * leaf should be a child of np
		 */
		assert(np->tn_left == NULL && np->tn_right == NULL);
		TAILQ_REMOVE(&tp->tr_lru, np, tn_chain);
		TAILQ_INSERT_HEAD(&tp->tr_lru, bp, tn_chain);
		if (bp->tn_prefixlen != np->tn_prefixlen) {
			_set(bp->tn_key, np->tn_prefixlen + 1);
			np->tn_left = leaf;
			np->tn_right = bp;
		} else {
			np->tn_left = bp;
			np->tn_right = leaf;
		}
		bp->tn_parent = np;
		leaf->tn_parent = np;
		return (leaf);
	}

	if (np->tn_parent->tn_left == np)
		np->tn_parent->tn_left = bp;
	else
		np->tn_parent->tn_right = bp;
	bp->tn_parent = np->tn_parent;
	if (_isset(key, bp->tn_prefixlen + 1)) {
		bp->tn_left = np;
		bp->tn_right = leaf;
	} else {
		bp->tn_left = leaf;
		bp->tn_right = np;
	}
	np->tn_parent = bp;
	leaf->tn_parent = bp;

	return (leaf);
}
Esempio n. 9
0
int
cmd_join_pane_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct args		*args = self->args;
	struct session		*dst_s;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*src_wp, *dst_wp;
	char			*cause;
	int			 size, percentage, dst_idx;
	enum layout_type	 type;
	struct layout_cell	*lc;

	dst_wl = cmd_find_pane(ctx, args_get(args, 't'), &dst_s, &dst_wp);
	if (dst_wl == NULL)
		return (-1);
	dst_w = dst_wl->window;
	dst_idx = dst_wl->idx;

	src_wl = cmd_find_pane(ctx, args_get(args, 's'), NULL, &src_wp);
	if (src_wl == NULL)
		return (-1);
	src_w = src_wl->window;

	if (src_w == dst_w) {
		ctx->error(ctx, "can't join a pane to its own window");
		return (-1);
	}

	type = LAYOUT_TOPBOTTOM;
	if (args_has(args, 'h'))
		type = LAYOUT_LEFTRIGHT;

	size = -1;
	if (args_has(args, 'l')) {
		size = args_strtonum(args, 'l', 0, INT_MAX, &cause);
		if (cause != NULL) {
			ctx->error(ctx, "size %s", cause);
			xfree(cause);
			return (-1);
		}
	} else if (args_has(args, 'p')) {
		percentage = args_strtonum(args, 'p', 0, 100, &cause);
		if (cause != NULL) {
			ctx->error(ctx, "percentage %s", cause);
			xfree(cause);
			return (-1);
		}
		if (type == LAYOUT_TOPBOTTOM)
			size = (dst_wp->sy * percentage) / 100;
		else
			size = (dst_wp->sx * percentage) / 100;
	}

	if ((lc = layout_split_pane(dst_wp, type, size)) == NULL) {
		ctx->error(ctx, "create pane failed: pane too small");
		return (-1);
	}

	layout_close_pane(src_wp);

	if (src_w->active == src_wp) {
		src_w->active = TAILQ_PREV(src_wp, window_panes, entry);
		if (src_w->active == NULL)
			src_w->active = TAILQ_NEXT(src_wp, entry);
	}
	TAILQ_REMOVE(&src_w->panes, src_wp, entry);

	if (window_count_panes(src_w) == 0)
		server_kill_window(src_w);

	src_wp->window = dst_w;
	TAILQ_INSERT_AFTER(&dst_w->panes, dst_wp, src_wp, entry);
	layout_assign_pane(lc, src_wp);

	recalculate_sizes();

	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	if (!args_has(args, 'd')) {
		window_set_active_pane(dst_w, src_wp);
		session_select(dst_s, dst_idx);
		server_redraw_session(dst_s);
	} else
		server_status_session(dst_s);

	return (0);
}
Esempio n. 10
0
/*
 * This is now called from local media FS's to operate against their
 * own vnodes if they fail to implement VOP_GETPAGES.
 */
int
vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
    int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg)
{
	vm_object_t object;
	struct bufobj *bo;
	struct buf *bp;
	off_t foff;
#ifdef INVARIANTS
	off_t blkno0;
#endif
	int bsize, pagesperblock, *freecnt;
	int error, before, after, rbehind, rahead, poff, i;
	int bytecount, secmask;

	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
	    ("%s does not support devices", __func__));

	if (vp->v_iflag & VI_DOOMED)
		return (VM_PAGER_BAD);

	object = vp->v_object;
	foff = IDX_TO_OFF(m[0]->pindex);
	bsize = vp->v_mount->mnt_stat.f_iosize;
	pagesperblock = bsize / PAGE_SIZE;

	KASSERT(foff < object->un_pager.vnp.vnp_size,
	    ("%s: page %p offset beyond vp %p size", __func__, m[0], vp));
	KASSERT(count <= sizeof(bp->b_pages),
	    ("%s: requested %d pages", __func__, count));

	/*
	 * The last page has valid blocks.  Invalid part can only
	 * exist at the end of file, and the page is made fully valid
	 * by zeroing in vm_pager_get_pages().
	 */
	if (m[count - 1]->valid != 0 && --count == 0) {
		if (iodone != NULL)
			iodone(arg, m, 1, 0);
		return (VM_PAGER_OK);
	}

	/*
	 * Synchronous and asynchronous paging operations use different
	 * free pbuf counters.  This is done to avoid asynchronous requests
	 * to consume all pbufs.
	 * Allocate the pbuf at the very beginning of the function, so that
	 * if we are low on certain kind of pbufs don't even proceed to BMAP,
	 * but sleep.
	 */
	freecnt = iodone != NULL ?
	    &vnode_async_pbuf_freecnt : &vnode_pbuf_freecnt;
	bp = getpbuf(freecnt);

	/*
	 * Get the underlying device blocks for the file with VOP_BMAP().
	 * If the file system doesn't support VOP_BMAP, use old way of
	 * getting pages via VOP_READ.
	 */
	error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before);
	if (error == EOPNOTSUPP) {
		relpbuf(bp, freecnt);
		VM_OBJECT_WLOCK(object);
		for (i = 0; i < count; i++) {
			PCPU_INC(cnt.v_vnodein);
			PCPU_INC(cnt.v_vnodepgsin);
			error = vnode_pager_input_old(object, m[i]);
			if (error)
				break;
		}
		VM_OBJECT_WUNLOCK(object);
		return (error);
	} else if (error != 0) {
		relpbuf(bp, freecnt);
		return (VM_PAGER_ERROR);
	}

	/*
	 * If the file system supports BMAP, but blocksize is smaller
	 * than a page size, then use special small filesystem code.
	 */
	if (pagesperblock == 0) {
		relpbuf(bp, freecnt);
		for (i = 0; i < count; i++) {
			PCPU_INC(cnt.v_vnodein);
			PCPU_INC(cnt.v_vnodepgsin);
			error = vnode_pager_input_smlfs(object, m[i]);
			if (error)
				break;
		}
		return (error);
	}

	/*
	 * A sparse file can be encountered only for a single page request,
	 * which may not be preceded by call to vm_pager_haspage().
	 */
	if (bp->b_blkno == -1) {
		KASSERT(count == 1,
		    ("%s: array[%d] request to a sparse file %p", __func__,
		    count, vp));
		relpbuf(bp, freecnt);
		pmap_zero_page(m[0]);
		KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
		    __func__, m[0]));
		VM_OBJECT_WLOCK(object);
		m[0]->valid = VM_PAGE_BITS_ALL;
		VM_OBJECT_WUNLOCK(object);
		return (VM_PAGER_OK);
	}

#ifdef INVARIANTS
	blkno0 = bp->b_blkno;
#endif
	bp->b_blkno += (foff % bsize) / DEV_BSIZE;

	/* Recalculate blocks available after/before to pages. */
	poff = (foff % bsize) / PAGE_SIZE;
	before *= pagesperblock;
	before += poff;
	after *= pagesperblock;
	after += pagesperblock - (poff + 1);
	if (m[0]->pindex + after >= object->size)
		after = object->size - 1 - m[0]->pindex;
	KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d",
	    __func__, count, after + 1));
	after -= count - 1;

	/* Trim requested rbehind/rahead to possible values. */   
	rbehind = a_rbehind ? *a_rbehind : 0;
	rahead = a_rahead ? *a_rahead : 0;
	rbehind = min(rbehind, before);
	rbehind = min(rbehind, m[0]->pindex);
	rahead = min(rahead, after);
	rahead = min(rahead, object->size - m[count - 1]->pindex);
	/*
	 * Check that total amount of pages fit into buf.  Trim rbehind and
	 * rahead evenly if not.
	 */
	if (rbehind + rahead + count > nitems(bp->b_pages)) {
		int trim, sum;

		trim = rbehind + rahead + count - nitems(bp->b_pages) + 1;
		sum = rbehind + rahead;
		if (rbehind == before) {
			/* Roundup rbehind trim to block size. */
			rbehind -= roundup(trim * rbehind / sum, pagesperblock);
			if (rbehind < 0)
				rbehind = 0;
		} else
			rbehind -= trim * rbehind / sum;
		rahead -= trim * rahead / sum;
	}
	KASSERT(rbehind + rahead + count <= nitems(bp->b_pages),
	    ("%s: behind %d ahead %d count %d", __func__,
	    rbehind, rahead, count));

	/*
	 * Fill in the bp->b_pages[] array with requested and optional   
	 * read behind or read ahead pages.  Read behind pages are looked
	 * up in a backward direction, down to a first cached page.  Same
	 * for read ahead pages, but there is no need to shift the array
	 * in case of encountering a cached page.
	 */
	i = bp->b_npages = 0;
	if (rbehind) {
		vm_pindex_t startpindex, tpindex;
		vm_page_t p;

		VM_OBJECT_WLOCK(object);
		startpindex = m[0]->pindex - rbehind;
		if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL &&
		    p->pindex >= startpindex)
			startpindex = p->pindex + 1;

		/* tpindex is unsigned; beware of numeric underflow. */
		for (tpindex = m[0]->pindex - 1;
		    tpindex >= startpindex && tpindex < m[0]->pindex;
		    tpindex--, i++) {
			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
			if (p == NULL) {
				/* Shift the array. */
				for (int j = 0; j < i; j++)
					bp->b_pages[j] = bp->b_pages[j + 
					    tpindex + 1 - startpindex]; 
				break;
			}
			bp->b_pages[tpindex - startpindex] = p;
		}

		bp->b_pgbefore = i;
		bp->b_npages += i;
		bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE;
	} else
		bp->b_pgbefore = 0;

	/* Requested pages. */
	for (int j = 0; j < count; j++, i++)
		bp->b_pages[i] = m[j];
	bp->b_npages += count;

	if (rahead) {
		vm_pindex_t endpindex, tpindex;
		vm_page_t p;

		if (!VM_OBJECT_WOWNED(object))
			VM_OBJECT_WLOCK(object);
		endpindex = m[count - 1]->pindex + rahead + 1;
		if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL &&
		    p->pindex < endpindex)
			endpindex = p->pindex;
		if (endpindex > object->size)
			endpindex = object->size;

		for (tpindex = m[count - 1]->pindex + 1;
		    tpindex < endpindex; i++, tpindex++) {
			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
			if (p == NULL)
				break;
			bp->b_pages[i] = p;
		}

		bp->b_pgafter = i - bp->b_npages;
		bp->b_npages = i;
	} else
		bp->b_pgafter = 0;

	if (VM_OBJECT_WOWNED(object))
		VM_OBJECT_WUNLOCK(object);

	/* Report back actual behind/ahead read. */
	if (a_rbehind)
		*a_rbehind = bp->b_pgbefore;
	if (a_rahead)
		*a_rahead = bp->b_pgafter;

#ifdef INVARIANTS
	KASSERT(bp->b_npages <= nitems(bp->b_pages),
	    ("%s: buf %p overflowed", __func__, bp));
	for (int j = 1, prev = 1; j < bp->b_npages; j++) {
		if (bp->b_pages[j] == bogus_page)
			continue;
		KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex ==
		    j - prev, ("%s: pages array not consecutive, bp %p",
		     __func__, bp));
		prev = j;
	}
#endif

	/*
	 * Recalculate first offset and bytecount with regards to read behind.
	 * Truncate bytecount to vnode real size and round up physical size
	 * for real devices.
	 */
	foff = IDX_TO_OFF(bp->b_pages[0]->pindex);
	bytecount = bp->b_npages << PAGE_SHIFT;
	if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
		bytecount = object->un_pager.vnp.vnp_size - foff;
	secmask = bo->bo_bsize - 1;
	KASSERT(secmask < PAGE_SIZE && secmask > 0,
	    ("%s: sector size %d too large", __func__, secmask + 1));
	bytecount = (bytecount + secmask) & ~secmask;

	/*
	 * And map the pages to be read into the kva, if the filesystem
	 * requires mapped buffers.
	 */
	if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
	    unmapped_buf_allowed) {
		bp->b_data = unmapped_buf;
		bp->b_offset = 0;
	} else {
		bp->b_data = bp->b_kvabase;
		pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
	}

	/* Build a minimal buffer header. */
	bp->b_iocmd = BIO_READ;
	KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
	KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
	bp->b_rcred = crhold(curthread->td_ucred);
	bp->b_wcred = crhold(curthread->td_ucred);
	pbgetbo(bo, bp);
	bp->b_vp = vp;
	bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount;
	bp->b_iooffset = dbtob(bp->b_blkno);
	KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) ==
	    (blkno0 - bp->b_blkno) * DEV_BSIZE +
	    IDX_TO_OFF(m[0]->pindex) % bsize,
	    ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju "
	    "blkno0 %ju b_blkno %ju", bsize,
	    (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex,
	    (uintmax_t)blkno0, (uintmax_t)bp->b_blkno));

	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
	PCPU_INC(cnt.v_vnodein);
	PCPU_ADD(cnt.v_vnodepgsin, bp->b_npages);

	if (iodone != NULL) { /* async */
		bp->b_pgiodone = iodone;
		bp->b_caller1 = arg;
		bp->b_iodone = vnode_pager_generic_getpages_done_async;
		bp->b_flags |= B_ASYNC;
		BUF_KERNPROC(bp);
		bstrategy(bp);
		return (VM_PAGER_OK);
	} else {
		bp->b_iodone = bdone;
		bstrategy(bp);
		bwait(bp, PVM, "vnread");
		error = vnode_pager_generic_getpages_done(bp);
		for (i = 0; i < bp->b_npages; i++)
			bp->b_pages[i] = NULL;
		bp->b_vp = NULL;
		pbrelbo(bp);
		relpbuf(bp, &vnode_pbuf_freecnt);
		return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
	}
}
Esempio n. 11
0
    /* here we should check if we reached the end of the DNS server list */
    hash_remove_request(pData, (struct request *)arg);
    free((struct request *)arg);
    ++removed_queries;
}
#else /* VBOX */
static void
timeout(PNATState pData, struct socket *so, void *arg)
{
    struct request *req = (struct request *)arg;
    struct dns_entry *de;
    /* be paranoid */
    AssertPtrReturnVoid(arg);

    if (   req->dnsgen != pData->dnsgen
        || req->dns_server == NULL
        || (de = TAILQ_PREV(req->dns_server, dns_list_head, de_list)) == NULL)
    {
        if (req->dnsgen != pData->dnsgen)
        {
            /* XXX: Log2 */
            LogRel(("NAT: dnsproxy: timeout: req %p dnsgen %u != %u on %R[natsock]\n",
                    req, req->dnsgen, pData->dnsgen, so));
        }
        hash_remove_request(pData, req);
        RTMemFree(req);
        ++removed_queries;
        /* the rest of clean up at the end of the method. */
    }
    else
    {
        struct ip *ip;
        struct udphdr *udp;
        int iphlen;
        struct mbuf *m = NULL;
        char *data;

        m = slirpDnsMbufAlloc(pData);
        if (m == NULL)
        {
            LogRel(("NAT: Can't allocate mbuf\n"));
            goto socket_clean_up;
        }

        /* mbuf initialization */
        m->m_data += if_maxlinkhdr;

        ip = mtod(m, struct ip *);
        udp = (struct udphdr *)&ip[1]; /* ip attributes */
        data = (char *)&udp[1];
        iphlen = sizeof(struct ip);

        m->m_len += sizeof(struct ip);
        m->m_len += sizeof(struct udphdr);
        m->m_len += req->nbyte;

        ip->ip_src.s_addr = so->so_laddr.s_addr;
        ip->ip_dst.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_DNS);
        udp->uh_dport = ntohs(53);
        udp->uh_sport = so->so_lport;

        memcpy(data, req->byte, req->nbyte); /* coping initial req */

        /* req points to so->so_timeout_arg */
        req->dns_server = de;

        /* expiration will be bumped in dnsproxy_query */

        dnsproxy_query(pData, so, m, iphlen);
        /* should we free so->so_m ? */
        return;
    }

 socket_clean_up:
    /* This socket (so) will be detached, so we need to remove timeout(&_arg) references
     * before leave
     */
    so->so_timeout = NULL;
    so->so_timeout_arg = NULL;
    return;

}
Esempio n. 12
0
File: move.c Progetto: Fresne/i3
/*
 * Moves the given container in the given direction (D_LEFT, D_RIGHT,
 * D_UP, D_DOWN).
 *
 */
void tree_move(Con *con, int direction) {
    position_t position;
    Con *target;

    DLOG("Moving in direction %d\n", direction);

    /* 1: get the first parent with the same orientation */

    if (con->type == CT_WORKSPACE) {
        DLOG("Not moving workspace\n");
        return;
    }

    if (con->parent->type == CT_WORKSPACE && con_num_children(con->parent) == 1) {
        /* This is the only con on this workspace */
        move_to_output_directed(con, direction);
        return;
    }

    orientation_t o = (direction == D_LEFT || direction == D_RIGHT ? HORIZ : VERT);

    Con *same_orientation = con_parent_with_orientation(con, o);
    /* The do {} while is used to 'restart' at this point with a different
     * same_orientation, see the very last lines before the end of this block
     * */
    do {
        /* There is no parent container with the same orientation */
        if (!same_orientation) {
            if (con_is_floating(con)) {
                /* this is a floating con, we just disable floating */
                floating_disable(con, true);
                return;
            }
            if (con_inside_floating(con)) {
                /* 'con' should be moved out of a floating container */
                DLOG("Inside floating, moving to workspace\n");
                attach_to_workspace(con, con_get_workspace(con), direction);
                goto end;
            }
            DLOG("Force-changing orientation\n");
            ws_force_orientation(con_get_workspace(con), o);
            same_orientation = con_parent_with_orientation(con, o);
        }

        /* easy case: the move is within this container */
        if (same_orientation == con->parent) {
            DLOG("We are in the same container\n");
            Con *swap;
            if ((swap = (direction == D_LEFT || direction == D_UP ? TAILQ_PREV(con, nodes_head, nodes) : TAILQ_NEXT(con, nodes)))) {
                if (!con_is_leaf(swap)) {
                    DLOG("Moving into our bordering branch\n");
                    target = con_descend_direction(swap, direction);
                    position = (con_orientation(target->parent) != o ||
                                        direction == D_UP ||
                                        direction == D_LEFT
                                    ? AFTER
                                    : BEFORE);
                    insert_con_into(con, target, position);
                    goto end;
                }
                if (direction == D_LEFT || direction == D_UP)
                    TAILQ_SWAP(swap, con, &(swap->parent->nodes_head), nodes);
                else
                    TAILQ_SWAP(con, swap, &(swap->parent->nodes_head), nodes);

                TAILQ_REMOVE(&(con->parent->focus_head), con, focused);
                TAILQ_INSERT_HEAD(&(swap->parent->focus_head), con, focused);

                DLOG("Swapped.\n");
                ipc_send_window_event("move", con);
                return;
            }

            if (con->parent == con_get_workspace(con)) {
                /*  If we couldn't find a place to move it on this workspace,
                 *  try to move it to a workspace on a different output */
                move_to_output_directed(con, direction);
                ipc_send_window_event("move", con);
                return;
            }

            /* If there was no con with which we could swap the current one,
             * search again, but starting one level higher. */
            same_orientation = con_parent_with_orientation(con->parent, o);
        }
    } while (same_orientation == NULL);

    /* this time, we have to move to another container */
    /* This is the container *above* 'con' (an ancestor of con) which is inside
     * 'same_orientation' */
    Con *above = con;
    while (above->parent != same_orientation)
        above = above->parent;

    /* Enforce the fullscreen focus restrictions. */
    if (!con_fullscreen_permits_focusing(above->parent)) {
        LOG("Cannot move out of fullscreen container\n");
        return;
    }

    DLOG("above = %p\n", above);

    Con *next = (direction == D_UP || direction == D_LEFT ? TAILQ_PREV(above, nodes_head, nodes) : TAILQ_NEXT(above, nodes));

    if (next && !con_is_leaf(next)) {
        DLOG("Moving into the bordering branch of our adjacent container\n");
        target = con_descend_direction(next, direction);
        position = (con_orientation(target->parent) != o ||
                            direction == D_UP ||
                            direction == D_LEFT
                        ? AFTER
                        : BEFORE);
        insert_con_into(con, target, position);
    } else {
        DLOG("Moving into container above\n");
        position = (direction == D_UP || direction == D_LEFT ? BEFORE : AFTER);
        insert_con_into(con, above, position);
    }

end:
    /* We need to call con_focus() to fix the focus stack "above" the container
     * we just inserted the focused container into (otherwise, the parent
     * container(s) would still point to the old container(s)). */
    con_focus(con);

    /* force re-painting the indicators */
    FREE(con->deco_render_params);

    tree_flatten(croot);
    ipc_send_window_event("move", con);
}