int mythread_yield() { int i; node * temporarynode = (node * ) malloc (sizeof(node)); // up the futex of the executig thread and then down the futex of the last thread in the list //that has already set its futex down so that the current thread will block and the next thread in the queue will start executing //futex_up(&block_main_futex); //futex_up(&(queue_info->head->thread->thread_futex)); void * callingaddress; callingaddress=__builtin_return_address(1); haveyielded=haveyielded+1; temporarynode=queue_info->head; queue_info->head=queue_info->head->right; temporarynode->left=queue_info->tail; if(queue_info->head!=NULL) { queue_info->head->left=NULL; } if(queue_info->tail!=NULL) { temporarynode->right=queue_info->tail->right; } queue_info->tail->right=temporarynode; queue_info->tail=temporarynode; futex_up(&(queue_info->head->thread->thread_futex)); futex_up(&yield_futex); futex_down(&(temporarynode->thread->thread_futex)); //futex_down(&(temporarynode->thread->thread_futex)); //futex_down(&(queue_info->tail->left->thread->thread_futex)); haveyielded=haveyielded-1; return 0; }
void *heap_check(void) { futex_down(&malloc_futex); if (first_heap_area == NULL) { futex_up(&malloc_futex); return (void *) -1; } /* Walk all heap areas */ for (heap_area_t *area = first_heap_area; area != NULL; area = area->next) { /* Check heap area consistency */ if ((area->magic != HEAP_AREA_MAGIC) || ((void *) area != area->start) || (area->start >= area->end) || (((uintptr_t) area->start % PAGE_SIZE) != 0) || (((uintptr_t) area->end % PAGE_SIZE) != 0)) { futex_up(&malloc_futex); return (void *) area; } /* Walk all heap blocks */ for (heap_block_head_t *head = (heap_block_head_t *) AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end; head = (heap_block_head_t *) (((void *) head) + head->size)) { /* Check heap block consistency */ if (head->magic != HEAP_BLOCK_HEAD_MAGIC) { futex_up(&malloc_futex); return (void *) head; } heap_block_foot_t *foot = BLOCK_FOOT(head); if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) || (head->size != foot->size)) { futex_up(&malloc_futex); return (void *) foot; } } } futex_up(&malloc_futex); return NULL; }
void fibril_mutex_unlock(fibril_mutex_t *fm) { assert(fibril_mutex_is_locked(fm)); futex_down(&async_futex); _fibril_mutex_unlock_unsafe(fm); futex_up(&async_futex); }
/* * This function unblocks one of the threads blocked on a conditional variable * The scheduling policy uses FIFO * Pre:A conditional variable on which threads are blocked * Post:One thread which was blocked on the conditional variable is unblocked * Returns: 1 on failure * 0 on success */ int mythread_cond_signal(mythread_cond_t * cond) { /* * Wait if there is any thread is executing conditional wait * This is used to ensure no signal is lost * while any thread is executing condtional wait code. */ if (futex_down(&ccb_table[*cond]->mutex)) { perror("failed to do futex_down"); return 1; } /* * Dequeue and resume the thread from blocked queue */ mythread_enter_kernel(); if(ccb_table[*cond]->q != NULL) { mythread_unblock(&ccb_table[*cond]->q, 0); } else { mythread_leave_kernel(); } /* * Resume any thread about to execute conditional wait */ if (futex_up(&ccb_table[*cond]->mutex)) { perror("failed to do futex up\n"); return 1; } return 0; }
void fibril_rwlock_write_lock(fibril_rwlock_t *frw) { fibril_t *f = (fibril_t *) fibril_get_id(); if (fibril_get_sercount() != 0) abort(); futex_down(&async_futex); if (frw->writers || frw->readers) { awaiter_t wdata; awaiter_initialize(&wdata); wdata.fid = (fid_t) f; wdata.wu_event.inlist = true; f->flags |= FIBRIL_WRITER; list_append(&wdata.wu_event.link, &frw->waiters); check_for_deadlock(&frw->oi); f->waits_for = &frw->oi; fibril_switch(FIBRIL_TO_MANAGER); } else { frw->oi.owned_by = f; frw->writers++; futex_up(&async_futex); } }
/* * This function is used to block on a conditional variable referenced by cond. The attribute referenced by mutex should already be blocked * Pre:A conditional variable , a locked mutex * Post:The calling thread will be blocked. Upon return mutex has been locked and owned by calling thread * Returns: 1 on failure * 0 on success */ int mythread_cond_wait(mythread_cond_t * cond, mythread_mutex_t * mutex) { mythread_enter_kernel(); mythread_block_phase1(&ccb_table[*cond]->q, 0); // Inorder to ensure there will be no signal in between these operations // and therefore prevent signal loss futex has been used. if (futex_down(&ccb_table[*cond]->mutex)) { perror("futex_down failed\n"); return 1; } // release lock after phase 1 if (mythread_mutex_unlock(mutex)) { perror("Failed to release lock\n"); return 1; } // in order to ensure signal is not received when the thread is about to suspend we need to perform futex up // and block phase 2 atomically mythread_enter_kernel(); if (futex_up(&ccb_table[*cond]->mutex)) { perror("Failed to futex_up\n"); return 1; } mythread_block_phase2(); // Acquire mutex lock before returning from conditional wait if (mythread_mutex_lock(mutex)) { perror("Failed to acquire lock"); return 1; } return 0; }
/** Allocate memory * * @param size Number of bytes to allocate. * * @return Allocated memory or NULL. * */ void *malloc(const size_t size) { futex_down(&malloc_futex); void *block = malloc_internal(size, BASE_ALIGN); futex_up(&malloc_futex); return block; }
/* Uses dispatcher to yeild the next Ready thread */ int mythread_yield() { mythread_t *self_tcb = queue_search_elementbyId(mythread_queue_globalVar, mythread_self().tid); /* Using yield_futex so that no other thread yields when one thread is yielding */ futex_down(&yield_futex); if (__dispatch_thread(self_tcb) == -1) { futex_up(&yield_futex); return 0; } /* Release the yield futex */ futex_up(&yield_futex); /* Sleep till another process wakes us up */ futex_down(&self_tcb->futex); return 0; }
bool fibril_mutex_is_locked(fibril_mutex_t *fm) { bool locked = false; futex_down(&async_futex); if (fm->counter <= 0) locked = true; futex_up(&async_futex); return locked; }
bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw) { bool locked = false; futex_down(&async_futex); if (frw->readers) locked = true; futex_up(&async_futex); return locked; }
/** Free a memory block * * @param addr The address of the block. * */ void free(const void *addr) { if (addr == NULL) return; futex_down(&malloc_futex); /* Calculate the position of the header. */ heap_block_head_t *head = (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); block_check(head); malloc_assert(!head->free); heap_area_t *area = head->area; area_check(area); malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); malloc_assert((void *) head < area->end); /* Mark the block itself as free. */ head->free = true; /* Look at the next block. If it is free, merge the two. */ heap_block_head_t *next_head = (heap_block_head_t *) (((void *) head) + head->size); if ((void *) next_head < area->end) { block_check(next_head); if (next_head->free) block_init(head, head->size + next_head->size, true, area); } /* Look at the previous block. If it is free, merge the two. */ if ((void *) head > (void *) AREA_FIRST_BLOCK_HEAD(area)) { heap_block_foot_t *prev_foot = (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t)); heap_block_head_t *prev_head = (heap_block_head_t *) (((void *) head) - prev_foot->size); block_check(prev_head); if (prev_head->free) block_init(prev_head, prev_head->size + head->size, true, area); } heap_shrink(area); futex_up(&malloc_futex); }
bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw) { bool locked = false; futex_down(&async_futex); if (frw->writers) { assert(frw->writers == 1); locked = true; } futex_up(&async_futex); return locked; }
void mythread_exit(void *retval) { int status; node * tempnode = (node *) malloc (sizeof(node)); if(queue_info->head->thread->tid==0) // if the exit is called by main then yield the main thread until the all the other threads are done { while(total_threads>1) { mythread_yield(); } exit(0); // when all the threads are done then exit out of the while loop and end the program } else // if mythread_exit was called by a child thread then just put calling thread out of the queue without modifying the other threads { tempnode = queue_info->head; queue_info->head=queue_info->head->right; futex_up(&(queue_info->head->thread->thread_futex)); futex_up(&yield_futex); total_threads=total_threads-1; futex_down(&block_exitthread_futex); } }
/** Allocate memory with specified alignment * * @param align Alignment in byes. * @param size Number of bytes to allocate. * * @return Allocated memory or NULL. * */ void *memalign(const size_t align, const size_t size) { if (align == 0) return NULL; size_t palign = 1 << (fnzb(max(sizeof(void *), align) - 1) + 1); futex_down(&malloc_futex); void *block = malloc_internal(size, palign); futex_up(&malloc_futex); return block; }
bool fibril_mutex_trylock(fibril_mutex_t *fm) { bool locked = false; futex_down(&async_futex); if (fm->counter > 0) { fm->counter--; fm->oi.owned_by = (fibril_t *) fibril_get_id(); locked = true; } futex_up(&async_futex); return locked; }
/* Dispatcher to wake the next ready thread */ int __dispatch_thread(mythread_t * thread) { struct mythread_queue_node* ptr = queue_search_element(mythread_queue_globalVar, thread->tid); /* Find the next READY thread */ ptr = ptr->nextNode; while (ptr->currentThread->state != READY) ptr = ptr->nextNode; /* No other thread is READY. Dispatcher does nothing */ if (ptr->currentThread == thread) return -1; else { /* Wake up the next ready thread */ futex_up(&ptr->currentThread->futex); return 0; } }
static void _fibril_condvar_wakeup_common(fibril_condvar_t *fcv, bool once) { link_t *tmp; awaiter_t *wdp; futex_down(&async_futex); while (!list_empty(&fcv->waiters)) { tmp = list_first(&fcv->waiters); wdp = list_get_instance(tmp, awaiter_t, wu_event.link); list_remove(&wdp->wu_event.link); wdp->wu_event.inlist = false; if (!wdp->active) { wdp->active = true; fibril_add_ready(wdp->fid); optimize_execution_power(); if (once) break; } } futex_up(&async_futex); }
void fibril_mutex_lock(fibril_mutex_t *fm) { fibril_t *f = (fibril_t *) fibril_get_id(); if (fibril_get_sercount() != 0) abort(); futex_down(&async_futex); if (fm->counter-- <= 0) { awaiter_t wdata; awaiter_initialize(&wdata); wdata.fid = fibril_get_id(); wdata.wu_event.inlist = true; list_append(&wdata.wu_event.link, &fm->waiters); check_for_deadlock(&fm->oi); f->waits_for = &fm->oi; fibril_switch(FIBRIL_TO_MANAGER); } else { fm->oi.owned_by = f; futex_up(&async_futex); } }
int fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm, suseconds_t timeout) { awaiter_t wdata; assert(fibril_mutex_is_locked(fm)); if (timeout < 0) return ETIMEOUT; awaiter_initialize(&wdata); wdata.fid = fibril_get_id(); wdata.to_event.inlist = timeout > 0; wdata.wu_event.inlist = true; futex_down(&async_futex); if (timeout) { getuptime(&wdata.to_event.expires); tv_add(&wdata.to_event.expires, timeout); async_insert_timeout(&wdata); } list_append(&wdata.wu_event.link, &fcv->waiters); _fibril_mutex_unlock_unsafe(fm); fibril_switch(FIBRIL_TO_MANAGER); fibril_mutex_lock(fm); /* async_futex not held after fibril_switch() */ futex_down(&async_futex); if (wdata.to_event.inlist) list_remove(&wdata.to_event.link); if (wdata.wu_event.inlist) list_remove(&wdata.wu_event.link); futex_up(&async_futex); return wdata.to_event.occurred ? ETIMEOUT : EOK; }
/* * This function unblocks all the threads which are blocked on the conditional variable referenced by cond * Pre:A conditional variable on which threads are blocked * Post: All threads which are blocked on the conditional variable are unblocked * Returns: 1 on failure * 0 on success */ int mythread_cond_broadcast(mythread_cond_t * cond) { if (futex_down(&ccb_table[*cond]->mutex)) { perror("failed to do futex_down\n"); return 1; } /* * Remove each thread from head of the queue and wake up all the blocked threads. */ mythread_enter_kernel(); while (ccb_table[*cond]->q != NULL) { mythread_unblock(&ccb_table[*cond]->q, 0); mythread_enter_kernel(); } mythread_leave_kernel(); /* * Resume any thread that is about to execute conditional wait */ if (futex_up(&ccb_table[*cond]->mutex)) { perror("failed to do futex_up\n"); return 1; } return 0; }
int main(int argc, char *argv[]) { struct futex *futex; int fd; void *map; if (argc != 4) { fprintf(stderr, "Usage: test <file> <offset> show|init|up|upfair|down|down1sec|mem|async\n"); exit(1); } fd = open(argv[1], O_RDWR); if (fd < 0) { perror("opening file"); exit(1); } map = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (map == MAP_FAILED) { perror("mmap"); exit(1); } if (futex_region(map, 4096) != 0) { perror("futex_region"); exit(1); } futex = map + atoi(argv[2]); if (strcmp(argv[3], "init") == 0) futex_init(futex); else if (strcmp(argv[3], "up") == 0) { int woken; woken = futex_up(futex); if (woken < 0) { perror("futex_up"); exit(1); } printf("Woke %i\n", woken); } else if (strcmp(argv[3], "upfair") == 0) { int woken; woken = futex_up_fair(futex); if (woken < 0) { perror("futex_up"); exit(1); } printf("Woke %i\n", woken); } else if (strcmp(argv[3], "down") == 0) { if (futex_down(futex) != 0) { perror("futex_down"); exit(1); } } else if (strcmp(argv[3], "down1sec") == 0) { struct timespec t; t.tv_sec = 1; t.tv_nsec = 0; if (futex_down_timeout(futex, &t) != 0) { perror("futex_down_timeout"); exit(1); } } else if (strcmp(argv[3], "mem") == 0) { struct futex myfutex; futex_init(&myfutex); futex_down(&myfutex); futex_down(&myfutex); } else if (strcmp(argv[3], "show") == 0) { printf("count = %i\n", futex->count); } else if (strcmp(argv[3], "async") == 0) { static struct sigaction saction; int futex_fd; /* Set up SIGIO handler */ saction.sa_sigaction = sigio_action; saction.sa_flags = SA_SIGINFO|SA_ONESHOT; while (futex_trydown(futex) != 0) { /* Register for signal */ sigaction(SIGIO, &saction, NULL); futex_fd = futex_await(futex, SIGIO); if (futex_fd < 0) { perror("await"); exit(1); } if (futex_trydown(futex) == 0) { close(futex_fd); break; } pause(); close(futex_fd); /* <= in case someone else decremented it */ if (futex->count <= FUTEX_PASSED) { futex->count = -1; fprintf(stderr, "Futex was passed to us.\n"); break; } } } else { fprintf(stderr, "Unknown operation %s\n", argv[3]); exit(1); } exit(0); }
/** Reallocate memory block * * @param addr Already allocated memory or NULL. * @param size New size of the memory block. * * @return Reallocated memory or NULL. * */ void *realloc(const void *addr, const size_t size) { if (addr == NULL) return malloc(size); futex_down(&malloc_futex); /* Calculate the position of the header. */ heap_block_head_t *head = (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); block_check(head); malloc_assert(!head->free); heap_area_t *area = head->area; area_check(area); malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); malloc_assert((void *) head < area->end); void *ptr = NULL; bool reloc = false; size_t real_size = GROSS_SIZE(ALIGN_UP(size, BASE_ALIGN)); size_t orig_size = head->size; if (orig_size > real_size) { /* Shrink */ if (orig_size - real_size >= STRUCT_OVERHEAD) { /* * Split the original block to a full block * and a trailing free block. */ block_init((void *) head, real_size, false, area); block_init((void *) head + real_size, orig_size - real_size, true, area); heap_shrink(area); } ptr = ((void *) head) + sizeof(heap_block_head_t); } else { /* * Look at the next block. If it is free and the size is * sufficient then merge the two. Otherwise just allocate * a new block, copy the original data into it and * free the original block. */ heap_block_head_t *next_head = (heap_block_head_t *) (((void *) head) + head->size); if (((void *) next_head < area->end) && (head->size + next_head->size >= real_size) && (next_head->free)) { block_check(next_head); block_init(head, head->size + next_head->size, false, area); split_mark(head, real_size); ptr = ((void *) head) + sizeof(heap_block_head_t); next_fit = NULL; } else reloc = true; } futex_up(&malloc_futex); if (reloc) { ptr = malloc(size); if (ptr != NULL) { memcpy(ptr, addr, NET_SIZE(orig_size)); free(addr); } } return ptr; }
int mythread_wrapper(void * arg) { void * fromthreadfunction ; node * thread_block; int temp; node * temptail,*temphead; int *i = (int *) malloc (sizeof(int)); char buffer[10]="wrapper"; void ** status; struct wrapper_arguments * arguments = (struct wrapper_arguments *) arg; void * (*functionpointer) (void *) = *(arguments->functionname); int jointargetcheck=0; /* creating a node and insertig the TCB into the queue */ total_threads+=1; temptail=(node *)malloc(sizeof(node)); temphead=(node *)malloc(sizeof(node)); thread_block=createNode(); thread_block->thread=arguments->tcb; temptail=queue_info->tail; addNode(thread_block); /* HAVE TO ADD getttid here to assign the threadid to the current block */ if(isfirsttime==0) { isfirsttime+=1; futex_up(&f); } else { futex_up(&block_main_futex); } //if(haveyielded>0) { //futex_down(&(temptail->thread->thread_futex)); } futex_down(&(queue_info->tail->thread->thread_futex));//locking its own futex rather than that of the previous ones futex_down(&yield_futex); // this is for the join condition check if the current threads target is in the execution queue if yes the yield the current thread if(queue_info->head->thread->join_target==NULL) { jointargetcheck=0; } else// check if the target is present in the queue if yes yield { jointargetcheck=findthread(queue_info->head->thread->join_target); } if(jointargetcheck==1)//added the futex down statements today latest { jointargetcheck=0; futex_down(&(queue_info->head->thread->thread_futex));//locking its own futex rather than that of the previous ones futex_down(&yield_futex); temp=mythread_join(*(queue_info->head->thread->join_target),status); } //jointargetcheck=mythread_join(queue_info->head->thread->join_target,status); (*(functionpointer))(((void *)arguments->functionarguments)); // when this thread is done move the head to the next position and free the previous nodes futex temphead=queue_info->head; queue_info->head=queue_info->head->right; if(queue_info->head!=NULL) queue_info->head->left=NULL; futex_up(&(queue_info->head->thread->thread_futex));//changing this futex_up(&yield_futex); //futex_down(&(queue_info->tail->left->thread->thread_futex)); total_threads=total_threads-1; }
/* For printing to console */ void printToConsole(char *buffer){ futex_down(&printFutex); write(1, buffer, strlen(buffer)); futex_up(&printFutex); }
static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw) { futex_down(&async_futex); if (frw->readers) { if (--frw->readers) { if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) { /* * If this reader firbril was considered the * owner of this rwlock, clear the ownership * information even if there are still more * readers. * * This is the limitation of the detection * mechanism rooted in the fact that tracking * all readers would require dynamically * allocated memory for keeping linkage info. */ frw->oi.owned_by = NULL; } goto out; } } else { frw->writers--; } assert(!frw->readers && !frw->writers); frw->oi.owned_by = NULL; while (!list_empty(&frw->waiters)) { link_t *tmp = list_first(&frw->waiters); awaiter_t *wdp; fibril_t *f; wdp = list_get_instance(tmp, awaiter_t, wu_event.link); f = (fibril_t *) wdp->fid; f->waits_for = NULL; if (f->flags & FIBRIL_WRITER) { if (frw->readers) break; wdp->active = true; wdp->wu_event.inlist = false; list_remove(&wdp->wu_event.link); fibril_add_ready(wdp->fid); frw->writers++; frw->oi.owned_by = f; optimize_execution_power(); break; } else { wdp->active = true; wdp->wu_event.inlist = false; list_remove(&wdp->wu_event.link); fibril_add_ready(wdp->fid); if (frw->readers++ == 0) { /* Consider the first reader the owner. */ frw->oi.owned_by = f; } optimize_execution_power(); } } out: futex_up(&async_futex); }