edge_ref *delaunay_edges(int nsites) { edge_ref *array, *stack; int edges = 0, top = -1; unsigned mark = next_mark; assert(array = (edge_ref *) malloc((3 * nsites - 5) * sizeof(edge_ref))); assert(stack = (edge_ref *) malloc((3 * nsites - 6) * sizeof(edge_ref))); if (++next_mark == 0) next_mark = 1; stack[++top] = delaunay_build(nsites); while (top != -1) { edge_ref e = stack[top--]; while (MARK(e) != mark) { MARK(e) = mark; array[edges++] = e; stack[++top] = ONEXT(e); e = ONEXT(SYM(e)); } } array[edges] = 0; free(stack); return array; }
struct listnode * list_add(t_list list, void *data){ MARK(); struct listnode * node = (struct listnode *)link_addnew((t_link)list,sizeof(struct listnode)); MARK(); node->data = data; return node; }
static PyObject *extract_list(struct pointerlist *root, int values) { PyObject *retval; int i; unsigned int *kdata; import_array(); MARK(); XX(print_pointerlist(root)); if (root->size > 0) { quicksort(root->lst, 0, root->size-1); } MARK(); if (values) { retval = PyList_New(0); for (i = 0; i < root->size; i++) { PyList_Append(retval, root->lst[i]->self); XX(PyObject_Print(root->lst[i]->self, stdout, 0)); XX(printf(" at address %p\n", root->lst[i]->self)); } MARK(); return retval; } retval = PyArray_FromDims(1, (int*)&(root->size), PyArray_INT); kdata = (unsigned int *) ((PyArrayObject*) retval)->data; for (i = 0; i < root->size; i++) { kdata[i] = root->lst[i]->key; } MARK(); return PyArray_Return((PyArrayObject*) retval); }
/** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. Your threadfn() must not call do_exit() * itself if you use this function! This can also be called after * kthread_create() instead of calling wake_up_process(): the thread * will exit without calling threadfn(). * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { int ret; mutex_lock(&kthread_stop_lock); /* It could exit after stop_info.k set, but before wake_up_process. */ get_task_struct(k); MARK(kernel_kthread_stop, "%d", k->pid); /* Must init completion *before* thread sees kthread_stop_info.k */ init_completion(&kthread_stop_info.done); smp_wmb(); /* Now set kthread_should_stop() to true, and wake it up. */ kthread_stop_info.k = k; wake_up_process(k); put_task_struct(k); /* Once it dies, reset stop ptr, gather result and we're done. */ wait_for_completion(&kthread_stop_info.done); kthread_stop_info.k = NULL; ret = kthread_stop_info.err; mutex_unlock(&kthread_stop_lock); MARK(kernel_kthread_stop_ret, "%d", ret); return ret; }
int main(int argc) { if (argc<2) p=true; BM bm; bm.generate(); aos::fill(aos::a,bm.values0,1024); aos::fill(aos::b,bm.values1,1024); aos::fill(aos::c,bm.values2,1024); aosP::fill(aosP::a,bm.values0,1024); aosP::fill(aosP::b,bm.values1,1024); aosP::fill(aosP::c,bm.values2,1024); memcpy(bm.values0,soa4::m1,4*4096); memcpy(bm.values1,soa4::m2,4*4096); memcpy(bm.values2,soa4::m3,4*4096); memcpy(bm.values0,soa3::m1,12*1096); memcpy(bm.values1,soa3::m2,12*1096); memcpy(bm.values2,soa3::m3,12*1096); memcpy(bm.values0,soaP::m1,4*4096); memcpy(bm.values1,soaP::m2,4*4096); memcpy(bm.values2,soaP::m3,4*4096); MARK(bm,aosTest); MARK(bm,parTest); MARK(bm,soa4Test); MARK(bm,soa3Test); MARK(bm,soaPTest); return 0; }
static int dfs(Agnode_t * n, Agedge_t * link, int warn) { Agedge_t *e; Agedge_t *f; Agraph_t *g = agrootof(n); MARK(n) = 1; for (e = agfstin(g, n); e; e = f) { f = agnxtin(g, e); if (e == link) continue; if (MARK(agtail(e))) agdelete(g, e); } for (e = agfstout(g, n); e; e = agnxtout(g, e)) { if (MARK(aghead(e))) { if (!warn) { warn++; fprintf(stderr, "warning: %s has cycle(s), transitive reduction not unique\n", agnameof(g)); fprintf(stderr, "cycle involves edge %s -> %s\n", agnameof(agtail(e)), agnameof(aghead(e))); } } else warn = dfs(aghead(e), AGOUT2IN(e), warn); } MARK(n) = 0; return warn; }
int find_first_step(sh_int src, sh_int target, int stay_zone) { int curr_dir; sh_int curr_room; int src_zone = ((src - (src % 100)) / 100); int target_zone = ((target - (target % 100)) / 100); if (src < 0 || src > top_of_world || target < 0 || target > top_of_world) { stderr_log("Illegal value passed to find_first_step (graph.c)"); return BFS_ERROR; } /* dez 19980805 if ((src_zone != target_zone && stay_zone == 1) || stay_zone == 2) { return BFS_NO_PATH; } */ if (src_zone != target_zone && stay_zone == 1) { return BFS_NO_PATH; } if (src == target) { return BFS_ALREADY_THERE; } /* clear marks first */ for (curr_room = 0; curr_room <= top_of_world; curr_room++) { UNMARK(curr_room); } MARK(src); /* first, enqueue the first steps, saving which direction we're going. */ for (curr_dir = 0; curr_dir < NUM_OF_DIRS; curr_dir++) { if (VALID_EDGE(src, curr_dir)) { MARK(TOROOM(src, curr_dir)); bfs_enqueue(TOROOM(src, curr_dir), curr_dir); } } /* now, do the classic BFS. */ while (queue_head) { if (queue_head->room == target) { curr_dir = queue_head->dir; bfs_clear_queue(); return curr_dir; } else { for (curr_dir = 0; curr_dir < NUM_OF_DIRS; curr_dir++) { if (VALID_EDGE(queue_head->room, curr_dir)) { MARK(TOROOM(queue_head->room, curr_dir)); bfs_enqueue(TOROOM(queue_head->room, curr_dir), queue_head->dir); } } bfs_dequeue(); } } return BFS_NO_PATH; }
void ComplexObject_mark(YoyoObject* ptr) { ptr->marked = true; ComplexObject* obj = (ComplexObject*) ptr; MARK(obj->base); for (size_t i = 0; i < obj->mixin_count; i++) { MARK(obj->mixins[i]); } }
/* ******************************************************************** This routines sorts a[0] ... a[n-1] using the fact that in their common prefix, after offset characters, there is a suffix whose rank is known. In this routine we call this suffix anchor (and we denote its position and rank with anchor_pos and anchor_rank respectively) but it is not necessarily an anchor (=does not necessarily starts at position multiple of Anchor_dist) since this function is called by pseudo_anchor_sort(). The routine works by scanning the suffixes before and after the anchor in order to find (and mark) those which are suffixes of a[0] ... a[n-1]. After that, the ordering of a[0] ... a[n-1] is derived with a sigle scan of the marked suffixes. ******************************************************************** */ static void general_anchor_sort(Int32 *a, Int32 n, Int32 anchor_pos, Int32 anchor_rank, Int32 offset) { int integer_cmp(const void *, const void *); Int32 sb, lo, hi; Int32 curr_lo, curr_hi, to_be_found, i,j; Int32 item; void *ris; assert(Sa[anchor_rank]==anchor_pos); /* ---------- get bucket of anchor ---------- */ sb = Get_small_bucket(anchor_pos); lo = BUCKET_FIRST(sb); hi = BUCKET_LAST(sb); assert(sb==Get_small_bucket(a[0]+offset)); // ------ sort pointers a[0] ... a[n-1] as plain integers qsort(a,n, sizeof(Int32), integer_cmp); // ------------------------------------------------------------------ // now we scan the bucket containing the anchor in search of suffixes // corresponding to the ones we have to sort. When we find one of // such suffixes we mark it. We go on untill n sfx's have been marked // ------------------------------------------------------------------ curr_hi = curr_lo = anchor_rank; // the anchor must correspond to a suffix to be sorted #if DEBUG item = anchor_pos-offset; assert(bsearch(&item,a,n,sizeof(Int32), integer_cmp)); #endif MARK(curr_lo); // scan suffixes preceeding and following the anchor for(to_be_found=n-1;to_be_found>0; ) { // invariant: the next positions to check are curr_lo-1 and curr_hi+1 assert(curr_lo > lo || curr_hi < hi); while (curr_lo > lo) { item = Sa[--curr_lo]-offset; ris = bsearch(&item,a,n,sizeof(Int32), integer_cmp); if(ris) {MARK(curr_lo); to_be_found--;} else break; } while (curr_hi < hi) { item = Sa[++curr_hi]-offset; ris = bsearch(&item,a,n,sizeof(Int32), integer_cmp); if(ris) {MARK(curr_hi); to_be_found--;} else break; } } // sort a[] using the marked suffixes for(j=0, i=curr_lo;i<=curr_hi;i++) if(ISMARKED(i)) { UNMARK(i); a[j++] = Sa[i] - offset; } assert(j==n); // make sure n items have been sorted }
static inline int add (struct S *a, struct S *b, int c) { *a->x += *b->x; a->y += b->y; l1: MARK (add_l1); u[c + 0]++; a = (struct S *) 0; u[c + 1]++; a = b; l2: MARK (add_l2); u[c + 2]++; return *a->x + *b->x + a->y + b->y; }
static int bar (int i) { int j = i; int k; struct S p[2] = { { &i, i * 2 }, { &j, j * 2 } }; l1: MARK (bar_l1); k = add (&p[0], &p[1], 0); l2: MARK (bar_l2); p[0].x = &j; p[1].x = &i; k += add (&p[0], &p[1], 3); l3: MARK (bar_l3); return i + j + k; }
static Block* new_block (int digits) /* Allocate a new block. */ { Block *b; b = MALLOC (Block, 1); MARK (b, lia_magic); b->storage = MALLOC (Lia, digits); /* >>> BZERO (b->storage, Lia, digits); * not needed? */ MARK (b->storage, lia_magic); b->first = 0; b->last = digits - 1; b->next = NULL; return (b); }
Lia_pool_adt lia_pool_open (int block_size) /* Opens a Lia pool and returns a pointer (ADT) to it. A "Lia pool" is essentially a list of blocks of n == block_size Lia digits. This list provides memory space for lia_pool_store(). Initially, there is only one block allocated. If this block gets filled up, a new one is allocated. */ { Pool *p; p = MALLOC (Pool, 1); MARK (p, lia_magic); p->magic = Magic_number; p->block_digits = Max (block_size, MIN_BLOCK_SIZE) * lia_common.max; p->block_list = new_block (p->block_digits); p->longs = 0; p->digits = 0; if (is_virgin) { lia_load (a_zero, 0); is_virgin = FALSE; } #ifdef DO_TYPECHECKING minp = (Pool *) Min (minp, p); maxp = (Pool *) Max (maxp, p); #endif return ((Lia_pool_adt) p); }
int l_remove(node_t *head, key_t key) { node_t *pred, *item, *sitem; while (TRUE) { if (!l_find(&pred, &item, head, key)) { trace("remove item failed %d\n", key); return FALSE; } sitem = STRIP_MARK(item); node_t *inext = sitem->next; /* 先标记再删除 */ if (!CAS(&sitem->next, inext, MARK(inext))) { trace("cas item %p mark failed\n", sitem->next); continue; } sitem->val = NULL_VALUE; int tag = GET_TAG(pred->next) + 1; if (CAS(&pred->next, item, TAG(STRIP_MARK(sitem->next), tag))) { trace("remove item %p success\n", item); haz_defer_free(sitem); return TRUE; } trace("cas item remove item %p failed\n", item); } return FALSE; }
void FXRbId::markfunc(FXId* self){ FXRbObject::markfunc(self); if(self){ FXRbGcMark(self->getApp()); if(self->getUserData()) MARK(self->getUserData()); } }
static int gfs_fill_super_info(struct gfs_super_info *si, u32 ino) { struct gfs_inode_info *inode; struct gfs_super *raw; struct buffer_head *bh; int err; mutex_init(&si->s_mutex); inode = gfs_iget_info(si->s_vfs_sb, ino); PDEBUG("ino=%d PTR_ERR=%li\n", (int) ino, (long)PTR_ERR(inode)); if (IS_ERR(inode)) return PTR_ERR(inode); si->s_super_inode = inode; bh = get_inode_data(inode,0); if(!bh) return -EIO; MARK(); raw = (struct gfs_super *) (bh->b_data + sizeof(struct gfs_inode)); err = init_super_from_raw(si, raw); put_inode_data(inode,0); return err; }
void remove_child(Agraph_t * graph, Agnode_t * node) { Agedge_t *edge; Agedge_t *nexte; /* Avoid cycles */ if MARKED (node) return; MARK(node); /* Skip nodes with more than one parent */ edge = agfstin(node); if (edge && (agnxtin(edge) != NULL)) { UNMARK(node); return; } /* recursively remove children */ for (edge = agfstout(node); edge; edge = nexte) { nexte = agnxtout(edge); if (aghead(edge) != node) { if (verbose) fprintf(stderr, "Processing descendant: %s\n", agnameof(aghead(edge))); remove_child(graph, aghead(edge)); agdeledge(edge); } } agdelnode(node); return; }
struct linknode * link_addnew(t_link link, int size){ MARK(); struct linknode * node = (struct linknode *)MALLOC(size); MARK(); D(" l:%p\n", link); D(" l->p:%p\n", link->prev); node->prev = link->prev; MARK(); link->prev->next = node; MARK(); node->next = link; link->prev = node; MARK(); return node; }
bool bar(int x) { ENTER(); if ((x < 1)) { MARK(); {EXIT(); return true;} RELEASE(); } else { MARK(); {EXIT(); return false;} RELEASE(); } EXIT(); }
EXPORT void plugin_setup(void) { static int i=0; MARK(); printf("plugin_setup: %d\n", i); host_callback(i); i++; }
void FXRbHeaderItem::markfunc(FXHeaderItem* self){ FXRbObject::markfunc(self); if(self){ FXRbGcMark(self->getIcon()); if(self->getData()) MARK(self->getData()); } }
static void dfs(Agraph_t * g, Agnode_t * n, Agraph_t * out, char *marks) { Agedge_t *e; Agnode_t *other; MARK(n) = 1; #ifndef WITH_CGRAPH aginsert(out, n); #else /* WITH_CGRAPH */ agsubnode(out,n,1); #endif /* WITH_CGRAPH */ for (e = agfstedge(g, n); e; e = agnxtedge(g, e, n)) { if ((other = agtail(e)) == n) other = aghead(e); if (!MARK(other)) dfs(g, other, out, marks); } }
void FXRbIconItem::markfunc(FXIconItem* self){ FXRbObject::markfunc(self); if(self){ FXRbGcMark(self->getBigIcon()); FXRbGcMark(self->getMiniIcon()); if(self->getData()) MARK(self->getData()); } }
void LoopReductor::depthFirstSearch(BasicBlock *bb, Vector<BasicBlock*> *ancestors) { ancestors->push(bb); MARK(bb) = true; SortedSLList<Edge*, EdgeDestOrder> successors; for (BasicBlock::OutIterator edge(bb); edge; edge++) successors.add(*edge); for (SortedSLList<Edge*, EdgeDestOrder>::Iterator edge(successors); edge; edge++) { if ((edge->kind() != Edge::CALL) && !edge->target()->isExit()){ if (MARK(edge->target()) == false) { depthFirstSearch(edge->target(), ancestors); } else { if (ancestors->contains(edge->target())) { LOOP_HEADER(edge->target()) = true; BACK_EDGE(edge) = true; bool inloop = false; for (Vector<BasicBlock*>::Iterator member(*ancestors); member; member++) { if (*member == edge->target()) inloop = true; if (inloop) { IN_LOOPS(member)->add(edge->target()->number()); } } } /* GRUIIIIIIIIIK !!! pas performant, mais bon.. */ for (dfa::BitSet::Iterator bit(**IN_LOOPS(edge->target())); bit; bit++) { bool inloop = false; for (Vector<BasicBlock*>::Iterator member(*ancestors); member; member++) { if (member->number() == *bit) inloop = true; if (inloop) { IN_LOOPS(member)->add(*bit); } } } } } } ancestors->pop(); }
AMBIX_EXPORT void ambix_info_setup(void) { ambix_info_class = class_new(gensym("ambix_info"), (t_newmethod)ambix_info_new, (t_method)ambix_info_free, sizeof(t_ambix_info), 0, A_GIMME, A_NULL); class_addmethod(ambix_info_class, (t_method)ambix_info_open, gensym("open"), A_SYMBOL, A_NULL); if(0) { MARK("setup"); } }
/* * find_first_step: given a source room and a target room, find the first * step on the shortest path from the source to the target. * * Intended usage: in mobile_activity, give a mob a dir to go if they're * tracking another mob or a PC. Or, a 'track' skill for PCs. */ int find_first_step(room_rnum src, room_rnum target) { int curr_dir; room_rnum curr_room; if (src == NOWHERE || target == NOWHERE || src > top_of_world || target > top_of_world) { extended_mudlog(NRM, SYSL_BUGS, TRUE, "Illegal value %d or %d passed to find_first_step. (%s)", src, target, __FILE__); return (BFS_ERROR); } if (src == target) return (BFS_ALREADY_THERE); /* clear marks first, some OLC systems will save the mark. */ for (curr_room = 0; curr_room <= top_of_world; curr_room++) UNMARK(curr_room); MARK(src); /* first, enqueue the first steps, saving which direction we're going. */ for (curr_dir = 0; curr_dir < NUM_OF_DIRS; curr_dir++) if (VALID_EDGE(src, curr_dir)) { MARK(TOROOM(src, curr_dir)); bfs_enqueue(TOROOM(src, curr_dir), curr_dir); } /* now, do the classic BFS. */ while (queue_head) { if (queue_head->room == target) { curr_dir = queue_head->dir; bfs_clear_queue(); return (curr_dir); } else { for (curr_dir = 0; curr_dir < NUM_OF_DIRS; curr_dir++) if (VALID_EDGE(queue_head->room, curr_dir)) { MARK(TOROOM(queue_head->room, curr_dir)); bfs_enqueue(TOROOM(queue_head->room, curr_dir), queue_head->dir); } bfs_dequeue(); } } return (BFS_NO_PATH); }
void lia_pool_close (Lia_pool_adt pid) /* Input/Output: pid. */ /* Returns all the unused memory of the current block to the system. */ { Pool *p = cast (pid); Block *b = p->block_list; b->last = b->first - 1; REALLOC (b->storage, Lia, b->last + 1); MARK (b->storage, lia_magic); }
void FXRbComboBox::markfunc(FXComboBox* self){ FXRbPacker::markfunc(self); if(self){ FXRbGcMark(self->getFont()); for(FXint i=0; i<self->getNumItems(); i++){ if(self->getItemData(i)) MARK(self->getItemData(i)); } } }
void DefaultArray_mark(YoyoObject* ptr) { ptr->marked = true; DefaultArray* arr = (DefaultArray*) ptr; MUTEX_LOCK(&arr->access_mutex); for (size_t i = 0; i < arr->size; i++) { if (arr->array[i] != NULL) MARK(arr->array[i]); } MUTEX_UNLOCK(&arr->access_mutex); }
/** * handle_IRQ_event - irq action chain handler * @irq: the interrupt number * @regs: pointer to a register structure * @action: the interrupt action chain for this irq * * Handles the action chain of an irq event */ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, struct irqaction *action) { irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; MARK(kernel_irq_entry, "%u %u", irq, (regs)?(!user_mode(regs)):(1)); handle_dynamic_tick(action); /* * Unconditionally enable interrupts for threaded * IRQ handlers: */ if (!hardirq_count() || !(action->flags & IRQF_DISABLED)) local_irq_enable(); do { unsigned int preempt_count = preempt_count(); ret = action->handler(irq, action->dev_id, regs); if (preempt_count() != preempt_count) { stop_trace(); print_symbol("BUG: unbalanced irq-handler preempt count in %s!\n", (unsigned long) action->handler); printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); dump_stack(); preempt_count() = preempt_count; } if (ret == IRQ_HANDLED) status |= action->flags; retval |= ret; action = action->next; } while (action); if (status & IRQF_SAMPLE_RANDOM) { local_irq_enable(); add_interrupt_randomness(irq); } local_irq_disable(); MARK(kernel_irq_exit, MARK_NOARGS); return retval; }