/* * split_overlap() divided an existing blame e into up to three parts * in split. Any assigned blame is moved to queue to * reflect the split. */ static void split_blame(struct blame_entry ***blamed, struct blame_entry ***unblamed, struct blame_entry *split, struct blame_entry *e) { if (split[0].suspect && split[2].suspect) { /* The first part (reuse storage for the existing entry e) */ dup_entry(unblamed, e, &split[0]); /* The last part -- me */ add_blame_entry(unblamed, &split[2]); /* ... and the middle part -- parent */ add_blame_entry(blamed, &split[1]); } else if (!split[0].suspect && !split[2].suspect) /* * The parent covers the entire area; reuse storage for * e and replace it with the parent. */ dup_entry(blamed, e, &split[1]); else if (split[0].suspect) { /* me and then parent */ dup_entry(unblamed, e, &split[0]); add_blame_entry(blamed, &split[1]); } else { /* parent and then me */ dup_entry(blamed, e, &split[1]); add_blame_entry(unblamed, &split[2]); } }
static int merged_entry(const struct cache_entry *ce, const struct cache_entry *old, struct unpack_trees_options *o) { int update = CE_UPDATE; struct cache_entry *merge = dup_entry(ce); if (!old) { /* * New index entries. In sparse checkout, the following * verify_absent() will be delayed until after * traverse_trees() finishes in unpack_trees(), then: * * - CE_NEW_SKIP_WORKTREE will be computed correctly * - verify_absent() be called again, this time with * correct CE_NEW_SKIP_WORKTREE * * verify_absent() call here does nothing in sparse * checkout (i.e. o->skip_sparse_checkout == 0) */ update |= CE_ADDED; merge->ce_flags |= CE_NEW_SKIP_WORKTREE; if (verify_absent(merge, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { free(merge); return -1; } invalidate_ce_path(merge, o); } else if (!(old->ce_flags & CE_CONFLICTED)) { /* * See if we can re-use the old CE directly? * That way we get the uptodate stat info. * * This also removes the UPDATE flag on a match; otherwise * we will end up overwriting local changes in the work tree. */ if (same(old, merge)) { copy_cache_entry(merge, old); update = 0; } else { if (verify_uptodate(old, o)) { free(merge); return -1; } /* Migrate old flags over */ update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); invalidate_ce_path(old, o); } } else { /* * Previously unmerged entry left as an existence * marker by read_index_unmerged(); */ invalidate_ce_path(old, o); } do_add_entry(o, merge, update, CE_STAGEMASK); return 1; }
static QUEUE_ENTRY* dup_entry_interval(QUEUE_ENTRY* entry, FLOATVAL now) { parrot_event *event; QUEUE_ENTRY *new_entry = dup_entry(entry); event = new_entry->data; event->u.timer_event.abs_time = now + event->u.timer_event.interval; return new_entry; }
void Parrot_schedule_broadcast_qentry(QUEUE_ENTRY* entry) { Parrot_Interp interp; parrot_event* event; size_t i; event = entry->data; switch (event->type) { case EVENT_TYPE_SIGNAL: edebug((stderr, "broadcast signal\n")); /* * we don't have special signal handlers in usercode yet * e.g.: * install handler like exception handler *and* * set a interpreter flag, that a handler exists * we then could examine that flag (after LOCKing it) * and dispatch the exception to all interpreters that * handle it * Finally, we send the first (main) interpreter that signal * * For now just send to all. * */ switch(event->u.signal) { case SIGINT: if (n_interpreters) { LOCK(interpreter_array_mutex); for (i = 1; i < n_interpreters; ++i) { edebug((stderr, "deliver SIGINT to %d\n", i)); interp = interpreter_array[i]; if (interp) Parrot_schedule_interp_qentry(interp, dup_entry(entry)); } UNLOCK(interpreter_array_mutex); } interp = interpreter_array[0]; Parrot_schedule_interp_qentry(interp, entry); edebug((stderr, "deliver SIGINT to 0\n")); break; default: mem_sys_free(entry); mem_sys_free(event); } break; default: mem_sys_free(entry); mem_sys_free(event); internal_exception(1, "Unknown event to broadcast"); break; } }
/* * split_overlap() divided an existing blame e into up to three parts in split. * Adjust the linked list of blames in the scoreboard to reflect the split. */ static void split_blame(git_blame *blame, git_blame__entry *split, git_blame__entry *e) { git_blame__entry *new_entry; if (split[0].suspect && split[2].suspect) { /* The first part (reuse storage for the existing entry e */ dup_entry(e, &split[0]); /* The last part -- me */ new_entry = git__malloc(sizeof(*new_entry)); memcpy(new_entry, &(split[2]), sizeof(git_blame__entry)); add_blame_entry(blame, new_entry); /* ... and the middle part -- parent */ new_entry = git__malloc(sizeof(*new_entry)); memcpy(new_entry, &(split[1]), sizeof(git_blame__entry)); add_blame_entry(blame, new_entry); } else if (!split[0].suspect && !split[2].suspect) { /* * The parent covers the entire area; reuse storage for e and replace it * with the parent */ dup_entry(e, &split[1]); } else if (split[0].suspect) { /* me and then parent */ dup_entry(e, &split[0]); new_entry = git__malloc(sizeof(*new_entry)); memcpy(new_entry, &(split[1]), sizeof(git_blame__entry)); add_blame_entry(blame, new_entry); } else { /* parent and then me */ dup_entry(e, &split[1]); new_entry = git__malloc(sizeof(*new_entry)); memcpy(new_entry, &(split[2]), sizeof(git_blame__entry)); add_blame_entry(blame, new_entry); } }
static void add_entry(struct unpack_trees_options *o, const struct cache_entry *ce, unsigned int set, unsigned int clear) { do_add_entry(o, dup_entry(ce), set, clear); }