/** * @brief Output moves of an id in a file * * @return 0 If output was NULL * @return 1 Otherwise * */ int identifier_moves_log(Identifier data, FILE *output) { if (output == NULL) return 0; Stack s, s2; stack_alloc(&s); identifier_to_stack(data, &s); stack_alloc(&s2); identifier_to_stack(data, &s2); char strmove[5]; int move = 0, a, b, c, d; fprintf(output, "MOVES : "); while ((move = stack_pop(&s)) != -1) { stack_expand(&a, &b, &c, &d, move); strmove[0] = a + 'a'; strmove[1] = b + '1'; strmove[2] = c + 'a'; strmove[3] = d + '1'; strmove[4] = '\0'; fprintf(output, "%s ", strmove); } fprintf(output, "<-> "); while ((move = stack_pop(&s2)) != -1) { fprintf(output, "%d ", move); } fprintf(output, "\n"); stack_free(&s2); stack_free(&s); return 1; }
void init_scheduler() { if(num_Thread==0) { th_t *mainThread, *scheduleThread; if((mainThread = thread_alloc()) == NULL) abort(); if((scheduleThread = thread_alloc()) == NULL) abort(); init_sigaction(); if((ready_queueHead = queueHead_alloc()) == NULL) abort(); if((sched_queueHead = queueHead_alloc()) == NULL) abort(); th_queue_init(ready_queueHead); th_queue_init(sched_queueHead); //th_queue_init(&kernel_queue); mainThread->mctx.status = TH_WAITING; mainThread->mctx.stackAddr = NULL; mainThread->tid = num_Thread++; scheduleThread->mctx.status = TH_SCHED; //scheduleThread->tid = num_kernel_thread++; main_kernel_id = scheduleThread->tid = getpid(); num_kernel_thread++; if((scheduleThread->mctx.stackAddr = stack_alloc()) == NULL) abort(); if((mainThread->mctx.stackAddr = stack_alloc()) == NULL) abort(); //create machine context void (*fnptr)(void*) = (void(*)(void*))scheduler; mctx_create(&(scheduleThread->mctx), fnptr, NULL, scheduleThread->mctx.stackAddr, STACK_SIZE); // mctx_create(&mctx_list[0],fnptr,NULL,mctx_list[0].stackAddr,STACK_SIZE); th_queue_insert(sched_queueHead, PRIORITY_SCHEDULER, scheduleThread); th_queue_insert(ready_queueHead, PRIORITY_NORMAL, mainThread); init_sigaction(); init_mutex(); //th_queue_insert(&kernel_queue, PRIORITY_SCHEDULER, scheduleThread); switch_to_scheduler(); //jump to scheduler } }
/* * thread_stack_daemon: * * Perform stack allocation as required due to * invoke failures. */ static void thread_stack_daemon(void) { thread_t thread; simple_lock(&thread_stack_lock); while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) { simple_unlock(&thread_stack_lock); stack_alloc(thread); (void)splsched(); thread_lock(thread); thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); thread_unlock(thread); (void)spllo(); simple_lock(&thread_stack_lock); } assert_wait((event_t)&thread_stack_queue, THREAD_UNINT); simple_unlock(&thread_stack_lock); thread_block((thread_continue_t)thread_stack_daemon); /*NOTREACHED*/ }
static int _kthread_create(kthread_t * thread, int gfp_flags, void * (*start_routine)(void *), void * args) { *thread = _kmalloc_kthread(); if(*thread) { if(stack_alloc(&((*thread)->stack), 1024, gfp_flags)) { void* stack_p = stack_top(&((*thread)->stack)); cpu_state_build(&((*thread)->cpu_state), start_routine, args, (void*)(stack_p), &_exited_kthread); } else { _free_kthread(*thread); *thread = NULL; return -1; } } return 0; }
/* * thread_doswapin: * * Swapin the specified thread, if it should be runnable, then put * it on a run queue. No locks should be held on entry, as it is * likely that this routine will sleep (waiting for stack allocation). */ kern_return_t thread_doswapin(thread_t thread) { kern_return_t kr; spl_t s; /* * Allocate the kernel stack. */ kr = stack_alloc(thread, thread_continue); if (kr != KERN_SUCCESS) return kr; /* * Place on run queue. */ s = splsched(); thread_lock(thread); thread->state &= ~(TH_SWAPPED | TH_SW_COMING_IN); if (thread->state & TH_RUN) thread_setrun(thread, TRUE); thread_unlock(thread); (void) splx(s); return KERN_SUCCESS; }
/* Create a new thread. It should be passed "fcn", a function which * takes two arguments, (the second one is a dummy, always 4). The * first argument is passed in "arg". Returns the TID of the new * thread */ static pid_t create_thread(int (*fcn)(void *), void *arg, void **stack) { pid_t newpid; int flags; void *my_stack; my_stack = stack_alloc(THREAD_STACK_SIZE); /* need SIGCHLD so parent will get that signal when child dies, * else have errors doing a wait */ flags = SIGCHLD | CLONE_THREAD | CLONE_VM | /* CLONE_THREAD => no signal to parent on termination; have to use * CLONE_CHILD_CLEARTID to get that. Since we're using library call * instead of raw system call we don't have child_tidptr argument, * so we set the location in the child itself via set_tid_address(). */ CLONE_CHILD_CLEARTID | CLONE_FS | CLONE_FILES | CLONE_SIGHAND; newpid = clone(fcn, my_stack, flags, arg); /* this is really a tid since we passed CLONE_THREAD: child has same pid as us */ if (newpid == -1) { fprintf(stderr, "smp.c: Error calling clone\n"); stack_free(my_stack, THREAD_STACK_SIZE); return -1; } *stack = my_stack; return newpid; }
/* Create a new thread. It should be passed "fcn", a function which * takes two arguments, (the second one is a dummy, always 4). The * first argument is passed in "arg". Returns the PID of the new * thread */ static pid_t create_thread(int (*fcn)(void *), void *arg, void **stack) { pid_t newpid; int flags; void *my_stack; my_stack = stack_alloc(THREAD_STACK_SIZE); /* need SIGCHLD so parent will get that signal when child dies, * else have errors doing a wait */ /* we're not doing CLONE_THREAD => child has its own pid * (the thread.c test tests CLONE_THREAD) */ flags = (SIGCHLD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND); newpid = clone(fcn, my_stack, flags, arg, &p_tid, NULL, &c_tid); if (newpid == -1) { print("smp.c: Error calling clone\n"); stack_free(my_stack, THREAD_STACK_SIZE); return -1; } *stack = my_stack; return newpid; }
static inline struct imm_value *interpret_dim(struct ast_entry *n) { struct ast_entry *e = n->child; struct imm_value *v, *l; l = stack_lookup_var(e->val->data.s); if(l && l->type != type_a_int) { printf("%s Already exists as a different type!\n", e->val->data.s); exit(1); } if(!l) l = stack_alloc(e->val->data.s); else { if(l->size != -1) { printf("%s Already dimensioned\n", e->val->data.s); exit(1); } } call_eval(v, e->child); /* FIXME: bounds checking! */ l->type = type_a_int; // FIXME: types!!! v->type; l->data.ip = malloc(v->data.i * sizeof(int)); l->size = v->data.i * sizeof(int); return l; }
GAME *clone_game(GAME *source) { GAME *target = stack_alloc(&game_stack, sizeof(GAME)); memcpy(target, source, sizeof(GAME)); return target; }
/* Create a new thread. It should be passed "fcn", a function which * takes two arguments, (the second one is a dummy, always 4). The * first argument is passed in "arg". Returns the TID of the new * thread */ static pid_t create_thread(int (*fcn)(void *), void *arg, void **stack, bool same_group) { pid_t newpid; int flags; void *my_stack; my_stack = stack_alloc(THREAD_STACK_SIZE); /* need SIGCHLD so parent will get that signal when child dies, * else have errors doing a wait */ flags = SIGCHLD | CLONE_VM | /* CLONE_THREAD => no signal to parent on termination; have to use * CLONE_CHILD_CLEARTID to get that. Since we're using library call * instead of raw system call we don't have child_tidptr argument, * so we set the location in the child itself via set_tid_address(). */ CLONE_CHILD_CLEARTID | CLONE_FS | CLONE_FILES | CLONE_SIGHAND; if (same_group) flags |= CLONE_THREAD; /* XXX: Using libc clone in the child here really worries me, but it seems * to work. My theory is that the parent has to call clone, which invokes * the loader to fill in the PLT entry, so when the child calls clone it * doesn't go into the loader and avoiding the races like we saw in i#500. */ newpid = clone(fcn, my_stack, flags, arg); /* this is really a tid if we passed CLONE_THREAD: child has same pid as us */ if (newpid == -1) { nolibc_print("smp.c: Error calling clone\n"); stack_free(my_stack, THREAD_STACK_SIZE); return -1; } *stack = my_stack; return newpid; }
/* * kernel_thread_create: * * Create a thread in the kernel task * to execute in kernel context. */ kern_return_t kernel_thread_create( thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread) { kern_return_t result; thread_t thread; task_t task = kernel_task; result = thread_create_internal(task, priority, continuation, TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) return (result); task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); stack_alloc(thread); assert(thread->kernel_stack != 0); #if CONFIG_EMBEDDED if (priority > BASEPRI_KERNEL) #endif thread->reserved_stack = thread->kernel_stack; thread->parameter = parameter; if(debug_task & 1) kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation); *new_thread = thread; return (result); }
/* proc_init ----------------------------------------------------------------*/ void proc_init() { int i; T_CTSK ctsk; void first_task(); void second_task(); extern T_TSK tsk[]; for (i = 0 ; i <= MAX_TSKID ; i ++) { proc[i].stack = (unsigned long)&proc[i].reg[0]; proc[i].id = i; } /* first_task -------------------------------------------------------*/ ctsk.task = (FP)first_task; ctsk.stk = stack_alloc(1024); ctsk.stksz = 1024; ctsk.itskpri = TMAX_TPRI; sys_cre_tsk(0, 1, &ctsk); sys_act_tsk(0, 1); tsk_stat_change(1, TTS_RUN); sched_ins(tsk[1].tskpri, &tsk[1].plink); /* second_task ------------------------------------------------------*/ ctsk.task = (FP)second_task; ctsk.stk = stack_alloc(1024); ctsk.stksz = 1024; ctsk.itskpri = TMAX_TPRI; sys_cre_tsk(1, 2, &ctsk); sys_act_tsk(1, 2); tsk_stat_change(2, TTS_RUN); sched_ins(tsk[2].tskpri, &tsk[2].plink); /* for first CPU ----------------------------------------------------*/ current_proc[0] = &proc[1]; c_tskid[0] = 1; /* for second CPU ---------------------------------------------------*/ current_proc[1] = &proc[2]; c_tskid[1] = 2; /* timer variable initialize ----------------------------------------*/ timer_return[0] = timer_return[1] = 0; clock_tick[0] = clock_tick[1] = 0; lost_tick[0] = lost_tick[1] = 0; }
/* Emit a stack frame marker */ struct imm_value *stack_alloc_frame(void) { struct imm_value *v; state.esp = state.stack_p; v = stack_alloc(NULL); v->type = type_frame; return v; }
void *stack_calloc(size_t size) { void* ptr = stack_alloc(size); if (ptr) { memset(ptr, 0, size); } return ptr; }
th_t* init_thread(th_t* thread) { memset(thread, 0, sizeof(th_t)); if((thread->mctx.stackAddr = stack_alloc()) == NULL) abort(); return thread; }
static void* stack_realloc(WasmAllocator* allocator, void* p, size_t size, size_t align, const char* file, int line) { if (!p) return stack_alloc(allocator, size, align, file, line); WasmStackAllocator* stack_allocator = (WasmStackAllocator*)allocator; /* TODO(binji): optimize */ WasmStackAllocatorChunk* chunk = stack_allocator->last; while (chunk) { if (allocation_in_chunk(chunk, p)) break; chunk = chunk->prev; } assert(chunk); void* result = stack_alloc(allocator, size, align, NULL, 0); #if TRACE_ALLOCATOR if (file) { TRACEF("%s:%d: stack_realloc(%p, %" PRIzd ", align=%" PRIzd ") => %p\n", file, line, p, size, align, result); } #endif /* TRACE_ALLOCATOR */ /* We know that the previously allocated data was at most extending from |p| to the end of the chunk. So we can copy at most that many bytes, or the new size, whichever is less. Use memmove because the regions may be overlapping. */ size_t old_max_size = (size_t)chunk->end - (size_t)p; size_t copy_size = size > old_max_size ? old_max_size : size; memmove(result, p, copy_size); #if WASM_STACK_ALLOCATOR_STATS /* count this is as a realloc, not an alloc */ stack_allocator->alloc_count--; stack_allocator->realloc_count++; stack_allocator->total_alloc_bytes -= size; stack_allocator->total_realloc_bytes += size; #endif /* WASM_STACK_ALLOCATOR_STATS */ return result; }
void dispatch_to_scheduler(th_t* thread) { int pri, currentPri; th_queue_t* foundQueueTh; int flag = 1; if(num_Thread ==0) { init_scheduler(); } thread->tid = num_Thread++; thread->mctx.status = TH_WAITING; th_queue_insert(ready_queueHead, pri, thread); printf("*** %d thread insert ***\n", num_Thread); //Choose kernel thread to be added if(num_kernel_thread < maxKernelThreads) { //Create a new kernel thread //Create scheduler stack for new kernel thread th_t *scheduleThread; int pid; if((scheduleThread = thread_alloc()) == NULL) abort(); scheduleThread->mctx.status = TH_SCHED; pid = scheduleThread->tid = getpid()+1; scheduleThread->current_tid = 0; num_kernel_thread++; if((scheduleThread->mctx.stackAddr = stack_alloc()) == NULL) abort(); th_queue_insert(sched_queueHead, PRIORITY_SCHEDULER, scheduleThread); rfork_thread(RFPROC | RFNOTEG|RFMEM, scheduleThread->mctx.stackAddr, (int(*)(void*))start_kernel_thread, pid); } return; /* if(pri > currentPri) { foundQueueTh->thread->mctx.status = TH_WAITING; currentTid = thread->tid; thread->mctx.status = TH_RUNNING; enable_timer(); mctx_switch(&(foundQueueTh->thread->mctx), &(thread->mctx)); } else switch_to_scheduler(); */ }
static inline struct imm_value *interpret_local(struct ast_entry *n) { struct ast_entry *e = n->child; struct imm_value *l; l = stack_alloc(e->val->data.s); if(e->id == tokn_array) { l->size = -1; l->type = type_a_int; } else if(e->id == tokn_label) { ; // Do nothing for now } return l; }
int bb_exit(int bid, int line) { int i, vid, off; int extra = 0; /* * this may add to reduce the store operation if (cbbs[bid].exit) goto store; if (cbbs[bid].exit2 == 0 && cbbs[bid].exit1 != 0) { i = cbbs[bid].exit1; if (cbbs[ } */ store: for (i = 0; i < TREG_CNT; i ++) { vid = cbbs[bid].env[i].vid; if (vid == 0) continue; vars[vid].rid = 0; if (cbbs[bid].env[i].dirty) { if (vars[vid].type == IR_TempVar) { if (line >= vars[vid].last_used) continue; if (!vars[vid].mm_alloc) stack_alloc(vid); } fr_append(inst_sw, i + TREG_BEG, $fp, vars[vid].off); cbbs[bid].env[i].dirty = 0; } } /* if (cbbs[bid].exit) fr_append(inst_j, func_cnt, 0, (int)"exit"); */ }
/* * Initialize the machine-dependent state for a new thread. */ kern_return_t thread_machine_create( struct thread_shuttle *thread, thread_act_t thr_act, void (*start_pos)(void)) { #if MACH_ASSERT if (watchacts & WA_PCB) printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos); #endif /* MACH_ASSERT */ thread->kernel_stack = (int)stack_alloc(); assert(thread->kernel_stack); /* * Utah code fiddles with pcb here - (we don't need to) */ return(KERN_SUCCESS); }
static WasmAllocatorMark stack_mark(WasmAllocator* allocator) { WasmStackAllocator* stack_allocator = (WasmStackAllocator*)allocator; /* allocate the space for the mark, but copy the current stack state now, so * when we reset we reset before the mark was allocated */ StackAllocatorMark mark; mark.chunk = stack_allocator->last; mark.chunk_current = mark.chunk->current; mark.last_allocation = stack_allocator->last_allocation; StackAllocatorMark* allocated_mark = stack_alloc( allocator, sizeof(StackAllocatorMark), WASM_DEFAULT_ALIGN, NULL, 0); #if WASM_STACK_ALLOCATOR_STATS /* don't count this allocation */ stack_allocator->alloc_count--; stack_allocator->total_alloc_bytes -= sizeof(StackAllocatorMark); #endif /* WASM_STACK_ALLOCATOR_STATS */ *allocated_mark = mark; return allocated_mark; }
static inline struct imm_value *interpret_assign(struct ast_entry *n) { struct ast_entry *e = n->child; struct imm_value *v, *l; int idx; l = stack_lookup_var(e->val->data.s); if(e->id == tokn_array) { call_eval(v, e->child); if(v->type != type_int) { printf("Array index must be an integer!\n"); exit(1); } idx = v->data.i; } call_eval(v, e->next); if(!l && e->id == tokn_label) { l = stack_alloc(e->val->data.s); l->type = v->type; } else if (e->id == tokn_array && (!l || l->size == -1)) { printf("Array undimensioned!\n"); exit(1); } switch(l->type) { case type_int: l->data.i = v->data.i; break; case type_a_int: l->data.ip[idx] = v->data.i; break; default: printf("Unhandled type\n"); } return l; }
thread_port_t tgdb_thread_create(vm_offset_t entry) { kern_return_t rc; mach_thread_t th; int i; for (th = &threads[0], i=0; i<MAX_THREADS; i++, th++) if (th->thread == MACH_PORT_NULL) break; if((rc = thread_create(mach_task_self(), &th->thread)) != KERN_SUCCESS) { printf("tgdb: thread_create returned %d\n", rc); return 0; } th->stack = stack_alloc(STACK_SIZE); set_thread_self(th); thread_set_regs(entry, th); return th->thread; }
int main(void) { printf("allocating...\n"); /* void *m = malloc(1024*1024*1024); int i; void *mas[1000000]; for (i=0; i<1000000; i++) { void *m = malloc(1024*1024); mas[i] = m; } */ //f(); stack_alloc(); printf("done\n"); return 0; }
void test_node_extract(void) { Node node; node_alloc(&node); Stack s; stack_alloc(&s); node.score = 143; stack_push(&s, 4142); stack_push(&s, 4243); stack_push(&s, 4344); stack_push(&s, 4445); stack_to_identifier(node.data, s, 111111); /* node_print(node); */ int move, score; node_extract(node, &move, &score); CU_ASSERT_EQUAL(move, 4142); CU_ASSERT_EQUAL(score, 143); stack_free(&s); node_free(&node); }
/* ================= idSurface::Split ================= */ int idSurface::Split( const idPlane &plane, const float epsilon, idSurface **front, idSurface **back, int *frontOnPlaneEdges, int *backOnPlaneEdges ) const { float * dists; float f; byte * sides; int counts[3]; int * edgeSplitVertex; int numEdgeSplitVertexes; int * vertexRemap[2]; int vertexIndexNum[2][2]; int * vertexCopyIndex[2]; int * indexPtr[2]; int indexNum[2]; int * index; int * onPlaneEdges[2]; int numOnPlaneEdges[2]; int maxOnPlaneEdges; int i; idSurface * surface[2]; idDrawVert v; dists = (float *) stack_alloc( verts.Num() * sizeof( float ) ); sides = (byte *) stack_alloc( verts.Num() * sizeof( byte ) ); counts[0] = counts[1] = counts[2] = 0; // determine side for each vertex for ( i = 0; i < verts.Num(); i++ ) { dists[i] = f = plane.Distance( verts[i].xyz ); if ( f > epsilon ) { sides[i] = SIDE_FRONT; } else if ( f < -epsilon ) { sides[i] = SIDE_BACK; } else { sides[i] = SIDE_ON; } counts[sides[i]]++; } *front = *back = NULL; // if coplanar, put on the front side if the normals match if ( !counts[SIDE_FRONT] && !counts[SIDE_BACK] ) { f = ( verts[indexes[1]].xyz - verts[indexes[0]].xyz ).Cross( verts[indexes[0]].xyz - verts[indexes[2]].xyz ) * plane.Normal(); if ( FLOATSIGNBITSET( f ) ) { *back = new idSurface( *this ); return SIDE_BACK; } else { *front = new idSurface( *this ); return SIDE_FRONT; } } // if nothing at the front of the clipping plane if ( !counts[SIDE_FRONT] ) { *back = new idSurface( *this ); return SIDE_BACK; } // if nothing at the back of the clipping plane if ( !counts[SIDE_BACK] ) { *front = new idSurface( *this ); return SIDE_FRONT; } // allocate front and back surface *front = surface[0] = new idSurface(); *back = surface[1] = new idSurface(); edgeSplitVertex = (int *) stack_alloc( edges.Num() * sizeof( int ) ); numEdgeSplitVertexes = 0; maxOnPlaneEdges = 4 * counts[SIDE_ON]; counts[SIDE_FRONT] = counts[SIDE_BACK] = counts[SIDE_ON] = 0; // split edges for ( i = 0; i < edges.Num(); i++ ) { int v0 = edges[i].verts[0]; int v1 = edges[i].verts[1]; int sidesOr = ( sides[v0] | sides[v1] ); // if both vertexes are on the same side or one is on the clipping plane if ( !( sides[v0] ^ sides[v1] ) || ( sidesOr & SIDE_ON ) ) { edgeSplitVertex[i] = -1; counts[sidesOr & SIDE_BACK]++; counts[SIDE_ON] += ( sidesOr & SIDE_ON ) >> 1; } else {
/// \brief Perform an SVC call to allocate stack for a thread /// \param thread_idx The thread index in the PSP table to initialize void os_KernelStackAlloc (os_tcb_p thread_p) { stack_alloc(thread_p); return ; }
hdata_t hdata_xml_alloc(hcchar * xml_str,InvokeTickDeclare){ hdata_t data = NULL,data_temp; hchar * p=(hchar *)xml_str; hstack_t data_stack = stack_alloc(); hstack_t state_stack = stack_alloc(); hbuffer_t tag_name_buffer = buffer_alloc(128, 256); hbuffer_t content_buffer = buffer_alloc(1024, 1024); hbuffer_t attr_name_buffer =buffer_alloc(128, 256); hbuffer_t attr_value_buffer = buffer_alloc(128, 256); hintptr s; hchar str_begin = 0; stack_push(state_stack, (hany) 0); stack_push(data_stack,hdata_array_alloc( 20, 20)); while (*p != '\0') { s = (hintptr)stack_peek(state_stack); if(s == 0x00){ if(SPACE_CHAR(*p)){ } else if( *p == '<' && p[1] == '?'){ stack_push(state_stack, (hany)0x10); p++; } else if( *p == '<' && p[1] == '!'){ stack_push(state_stack, (hany)0x11); p++; } else if( *p == '<' && p[1] == '/'){ data_temp = stack_pop(data_stack); data = stack_pop(data_stack); if( hdata_array_size(&hdata_class, data_temp) ==0){ hdata_object_remove(data, kCHILDS); } if(buffer_length(content_buffer) >0){ hdata_object_put(data, kVALUE, hdata_xml_value_parse(buffer_to_str(content_buffer),InvokeTickArg)); buffer_clear(content_buffer); } while( *p != '\0' && *p != '>'){ p ++; } if(*p == '\0'){ break; } } else if( *p == '<'){ buffer_clear(content_buffer); buffer_clear(tag_name_buffer); data = hdata_object_alloc(); hdata_array_add(stack_peek(data_stack), data); stack_push(data_stack, data); stack_push(state_stack, (hany)0x20); } else{ buffer_append(content_buffer,p,1); } } else if(s ==0x10){ // <? ?> if( p[0] == '?' && p[1] == '>'){ p++; stack_pop(state_stack); } } else if(s == 0x11){ // <! if(p[0] == '>' ){ stack_pop(state_stack); } } else if(s == 0x20){ // <tagname if(SPACE_CHAR(*p)){ hdata_object_put((hdata_t)stack_peek(data_stack), kTAGNAME, hdata_string_alloc( buffer_to_str(tag_name_buffer))); buffer_clear(tag_name_buffer); stack_pop(state_stack); stack_push(state_stack, (hany)0x21); } else if( *p == '/' && p[1] =='>'){ hdata_object_put((hdata_t)stack_peek(data_stack), kTAGNAME, hdata_string_alloc( buffer_to_str(tag_name_buffer))); buffer_clear(tag_name_buffer); stack_pop(data_stack); stack_pop(state_stack); p++; } else if( *p == '>'){ hdata_object_put((hdata_t)stack_peek(data_stack), kTAGNAME, hdata_string_alloc( buffer_to_str(tag_name_buffer))); buffer_clear(tag_name_buffer); data = hdata_array_alloc( 20, 20); hdata_object_put(stack_peek(data_stack), kCHILDS, data); stack_push(data_stack, data); stack_pop(state_stack); } else{ buffer_append(tag_name_buffer, p, 1); } } else if(s == 0x21){ // <tagname attrname= if(SPACE_CHAR(*p)){ } else if( *p == '/' && p[1] =='>'){ stack_pop(data_stack); stack_pop(state_stack); p++; } else if( *p == '>'){ data = hdata_array_alloc( 20, 20); hdata_object_put(stack_peek(data_stack), kCHILDS, data); stack_push(data_stack, data); stack_pop(state_stack); } else if( *p == '='){ stack_push(state_stack, (hany)0x22); } else { buffer_append(attr_name_buffer,p,1); } } else if(s == 0x22){ // <tagname attrname=valuebegin if(SPACE_CHAR(*p)){ } else if( *p == '\''){ str_begin = '\''; stack_pop(state_stack); stack_push(state_stack, (hany)0x23); } else if( *p == '"'){ str_begin = '"'; stack_pop(state_stack); stack_push(state_stack, (hany)0x23); } else { str_begin = 0; stack_pop(state_stack); stack_push(state_stack, (hany)0x23); buffer_append(attr_value_buffer, p, 1); } } else if( s == 0x23){ // <tagname attrname=value if(str_begin ==0){ if(SPACE_CHAR(*p)){ hdata_object_put((hdata_t)stack_peek(data_stack),buffer_to_str(attr_name_buffer),hdata_xml_value_parse(buffer_to_str(attr_value_buffer),InvokeTickArg)); buffer_clear(attr_name_buffer); buffer_append_str(attr_name_buffer, "@"); buffer_clear(attr_value_buffer); stack_pop(state_stack); } else{ buffer_append(attr_value_buffer, p, 1); } } else { if( p[0] == '\\' ){ if(p[1] == '\\'){ buffer_append_str(attr_value_buffer, "\\"); } else if(p[1] == 'n'){ buffer_append_str(attr_value_buffer, "\n"); } else if(p[1] == 'r'){ buffer_append_str(attr_value_buffer, "\r"); } else if(p[1] == 't'){ buffer_append_str(attr_value_buffer, "\t"); } else if(p[1] == '"'){ buffer_append_str(attr_value_buffer, "\""); } else if(p[1] == '\''){ buffer_append_str(attr_value_buffer, "\'"); } else{ buffer_append(attr_value_buffer,p+1,1); } p ++; } else if( p[0] == str_begin){ hdata_object_put((hdata_t)stack_peek(data_stack),buffer_to_str(attr_name_buffer),hdata_xml_value_parse(buffer_to_str(attr_value_buffer),InvokeTickArg)); buffer_clear(attr_name_buffer); buffer_append_str(attr_name_buffer, "@"); buffer_clear(attr_value_buffer); stack_pop(state_stack); } else{ buffer_append(attr_value_buffer,p,1); } } } p++; } data = (hdata_t)stack_pop(data_stack); data_temp = (hdata_t)stack_pop(data_stack); while(data_temp){ hdata_dealloc(data); data = data_temp; data_temp = (hdata_t)stack_pop(data_stack); } buffer_dealloc(tag_name_buffer); buffer_dealloc(content_buffer); buffer_dealloc(attr_name_buffer); buffer_dealloc(attr_value_buffer); stack_dealloc(data_stack); stack_dealloc(state_stack); return data; }
void process_proc(void){ unsigned int num_dsts, n_stks, n_tags, ip, tag, i, j, weight; unsigned long flags; char *dup; routing_table_t * new_routing_table = NULL; routing_table_t * old_routing_table = NULL; pr_debug("Parsing proc data"); dup = kstrdup(proc_routes_data, GFP_ATOMIC); num_dsts = get_int(&dup); if(num_dsts <= 0) { return; } pr_debug("Num routes: [%d] \nCreating new routing_table and flow_table\n", num_dsts); new_routing_table = routing_table_create(num_dsts); // TODO: 1. Reset flow table so that "flow table miss" happens for all "new" pkts // and updated routing_table is looked up // or 2. do not flush flow_table. wait for idle time out of flows // flow_table_delete(flow_table); // flow_table = flow_table_create(DEFAULT_FLOW_TABLE_SIZE); spin_lock_irqsave(&rt_lock, flags); old_routing_table = routing_table; // for all destination address while(num_dsts-->0){ // create stack_list struct stack_list stk_list; ip = get_int(&dup); pr_debug("IP: %u", ip); n_stks = get_int(&dup); pr_debug("#stacks: %d", n_stks); stack_list_alloc(&stk_list, n_stks); i = 0; while(i<n_stks) { // create stacks struct stack stk; weight = get_int(&dup); n_tags=get_int(&dup); pr_debug("weight = %d #tags: %d :\n", weight, n_tags); stack_alloc(&stk, n_tags); stk.weight = weight; j = 0; while(j < n_tags){ tag = get_int(&dup); pr_debug(" %d\n", tag); stk.tags[j] = tag; j++; } stk_list.stacks[i] = stk; i++; } // Add entries to new routing_table pr_debug("Updating routing table...\n"); routing_table_set(new_routing_table, ip, stk_list); } // Update routing table; rcu_assign_pointer(routing_table, new_routing_table); spin_unlock_irqrestore(&rt_lock, flags); if(old_routing_table != NULL){ pr_debug("Deleting old_routing_table\n"); routing_table_delete(old_routing_table); } pr_debug("End of data\n"); }
/// \brief Perform an SVC call to allocate stack for a thread /// \param thread_idx The thread index in the PSP table to initialize void os_KernelStackAlloc (uint32_t thread_idx) { stack_alloc(thread_idx); return ; }