void STLMetaClass::destruct( const ObjectWrapper &obj, bool called_from_destructor ) const { kernel_assert(obj.get_meta_class() == this); // an stl object is never destroyed from the destructor kernel_assert( !called_from_destructor ); _stl_descriptor->get_destructor_function()( obj ); }
void kernel_interrupts_init(int enable_timer) { KERNEL_TRACE("init", "enabling interrupts"); kernel_assert(cp0_status_ie_get() == 0); cp0_status_ie_enable(); if(enable_timer) { /* Start timer */ kernel_timer_init(); cp0_status_im_enable(MIPS_CP0_STATUS_IM_TIMER); } }
void UnionMetaClass::adjust_field_offsets() { /* AggregateMetaClass::adjust_field_offsets(); int currentOffset = _base_class ? _base_class->get_size_of_instance() : 0; */ #if 0 // I see absolutely no way this code could be correct if ( _tag_offset == -1 ) { // change the UnionMetaClass from being semantically controlled to tag controlled // insert an additional offset field MetaClass* intMC = synchronizer->get_object_factory()->lookupMetaClass("int"); kernel_assert( intMC ); int size = intMC -> get_size_of_instance(); currentOffset = currentOffset + size - ( currentOffset % size ); //align _tag_offset = currentOffset; add_field_description("_offset", intMC, currentOffset ); currentOffset += intMC -> get_size_of_instance(); } #endif // CAUTION - not corrected because we do not have any of these anymore kernel_assert_message(0,("This code should no longer have been used")); #if 0 // synchronize the offsets int startOffset = currentOffset; int endOffset = startOffset; suif_vector<FieldDescription*>::iterator it = _union_fields->begin(), end = _union_fields->end(); FieldDescription* fieldDescription; for ( ; it != end ; it++ ) { fieldDescription = *it; MetaClass* currentType = fieldDescription->metaClass; // adjust offset and alignment int memberSize = currentType -> get_size_of_instance(); int offset = fieldDescription->offset; if ( startOffset > offset ) offset = startOffset; offset = offset + memberSize - ( offset % memberSize ); changes |= fieldDescription->offset != currentOffset; fieldDescription->offset = currentOffset; fieldDescription->metaClass = currentType; int currentEndOffset = offset + memberSize; if ( currentEndOffset > endOffset ) endOffset = currentEndOffset; } changes |= (get_size() != endOffset); set_size( endOffset ); // don't call MetaClass::synchronize because it has already been called // indirectly through AggregateMetaClass::synchronize - DLM // MetaClass::synchronize( synchronizer ); #endif }
virtual bool previous( VirtualIterator* state ) { #ifdef AG IteratorState& it_state = state->top(); it_state.integer--; bool is_valid = ( _members->size() >= it_state.integer ); if ( !is_valid ) { state->pop(); kernel_assert(0); // is_valid = state->top().node->previous( state ); } return is_valid; #endif return false; }
void * kernel_malloc(size_t nbytes) { union overhead *op; int bucket; size_t amt; /* * First time malloc is called, setup page size and * align break pointer so all data will be page aligned. */ if (pagesz == 0) { pagesz = CHERIOS_PAGESIZE; init_pagebucket(); __init_heap(pagesz); } kernel_assert(pagesz != 0); /* * Convert amount of memory requested into closest block size * stored in hash buckets which satisfies request. * Account for space used per block for accounting. */ if (nbytes <= pagesz - sizeof (*op)) { amt = 32; /* size of first bucket */ bucket = 2; } else { amt = pagesz; bucket = pagebucket; } while (nbytes > (size_t)amt - sizeof(*op)) { amt <<= 1; if (amt == 0) return (NULL); bucket++; } /* * If nothing in hash bucket right now, * request more memory from the system. */ if ((op = nextf[bucket]) == NULL) { morecore(bucket); if ((op = nextf[bucket]) == NULL) return (NULL); } /* remove from linked list */ nextf[bucket] = op->ov_next; op->ov_magic = MAGIC; op->ov_index = bucket; return (cheri_setbounds(op + 1, nbytes)); }
int dskread(u8 *buf, u_int64_t lba, int nblk) { size_t size = nblk * DEV_BSIZE; size_t start = lba * DEV_BSIZE; //KERNEL_TRACE(__func__, "buf:%p lba:%03ld nblk:%ld start:%06lx size:%lx", buf, lba, nblk, start, size); u8 * fsp = &__fs_start; for(size_t i=0; i<size; i++) { // Check read is not out of bounds kernel_assert(fsp + start + i < &__fs_end); buf[i] = fsp[start + i]; } //KERNEL_TRACE(__func__, "done"); return 0; }
FieldDescription* UnionMetaClass:: get_proper_field_description(const Address address, size_t i) const { // This used to be an INT. Just to make sure no one really // tried to get the -1 field assert here. kernel_assert(i < (unsigned)-10); // if (i < 0) // return NULL; int index = get_tag_num( address ); if ( index > 0 ) { if (i == 0) { FieldDescription* field_description = (*_union_fields)[ index ]; return field_description; } i--; } return(NULL); }
void UnionMetaClass::initialize( const ObjectWrapper &obj, InputStream* inputStream ) const { Address address = obj.get_address(); kernel_assert(obj.get_meta_class() == this); // debugit("initializing union ",address); AggregateWrapper agg_obj(address, this); AggregateMetaClass::initialize( obj, inputStream ); int index = get_tag_num( address ); if (index < 0) zero_field(address); else { FieldDescription* field_description = (*_union_fields)[ index ]; ObjectWrapper field = field_description->build_object(agg_obj); //Address instance_address = (Address)( ( (Byte*)address ) + field_description->offset ); field.initialize(inputStream); // field_description->get_meta_class()->initialize( field.get_address() ,inputStream ); } }
void UnionMetaClass::write( const ObjectWrapper &obj, OutputStream* outputStream ) const { Address instance = obj.get_address(); kernel_assert(obj.get_meta_class() == this); AggregateWrapper agg_obj(instance, this); AggregateMetaClass::write( obj, outputStream ); int index = get_tag_num( instance ); // if the tag number is not stored in a field that was already // written out => write out the tag number if ( _tag_offset == -1 ) { outputStream->write_unsigned_int( index ); } if ( index >= 0 ) { FieldDescription* field_description = (*_union_fields)[ index ]; ObjectWrapper field = field_description->build_object(agg_obj); outputStream->write( field, false ); } }
void UnionMetaClass::read( const ObjectWrapper &obj, InputStream* inputStream ) const { Address instance = obj.get_address(); kernel_assert(obj.get_meta_class() == this); AggregateWrapper agg_obj(instance, this); AggregateMetaClass::read( obj, inputStream ); int index; if ( _tag_offset != -1 ) { index = *(int *) ((Byte*)instance + _tag_offset); } else { index = inputStream->read_unsigned_int(); } if ( index >= 0 ) { FieldDescription* field_description = (*_union_fields)[ index ]; ObjectWrapper field = field_description->build_object(agg_obj); inputStream->read( field, false ); } else { // works for now as only Union is a Union of pointers zero_field(instance); } }
UnionMetaClass::UnionMetaClass(const UnionMetaClass &) : _union_fields(0), _tag_offset(0), _union_selector(0) { kernel_assert(false); }
Address VirtualNode::current( const VirtualIterator* state ) const { kernel_assert( false ); return 0; }
bool VirtualNode::next( VirtualIterator* state ) { kernel_assert( false ); return false; }
Object::Object(const Object&) : _meta_class(0) { kernel_assert(false); }
virtual const MetaClass* current_meta_class( const VirtualIterator* state ) const { int index = state->top().get_iteration_num(); kernel_assert( ! (*_members)[ index ]->_continuation ); return (*_members)[ index ] -> _meta_class; }
void increment() { kernel_assert(!is_iterator()); _iteration_num++; }
int main( int argc, char* argv[] ) { //test_system_idle_instrumented(); //bwprintf(COM2, "Starting Kernel!"); //bwsetfifo(COM2,OFF); //create the globals structure struct kern_globals GLOBAL; struct request* req; //2385260 //0x2E56C /* bwprintf(COM2, "TID[%d]\tPC[%d]\tPRIOR[%d]\n\n", GLOBAL.ACTIVE_TASK->task_id, GLOBAL.ACTIVE_TASK->pc, GLOBAL.ACTIVE_TASK->priority); bwprintf(COM2, "REQ_NUM[%d]\n", GLOBAL.ACTIVE_TASK->req->req_num); bwprintf(COM2, "SND_TID[%d]\n", ((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid); bwprintf(COM2, "\tSND_STATE[%d]\n", GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].td_state); bwprintf(COM2, "\tSND_PRIOR[%d]\n", GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].priority); bwprintf(COM2, "\tSND_PC[%x]\n", GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].pc); bwprintf(COM2, "\tSND_REQ_NUM[%d]\n", GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req->req_num); bwprintf(COM2, "\t\tSND_REQ_MSG[%x]\n", ((struct request_receive*)GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req)->msg); bwprintf(COM2, "\t\tSND_REQ_MSG_LEN[%d]\n", ((struct request_receive*)GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req)->msglen); bwprintf(COM2, "\t\tSND_REQ_NUM[%d]\n", ((struct request_receive*)GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req)->req_num); bwprintf(COM2, "\t\tSND_REQ_TID_POINTER[%x]\n", ((struct request_receive*)GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req)->tid); bwprintf(COM2, "ROUTE_SRV_TID[45]\n", GLOBAL.USER_TDS[((struct request_send*)GLOBAL.ACTIVE_TASK->req)->Tid].req->req_num); bwprintf(COM2, "\tROUTE_SRV_STACK_BOTTOM[%x]\n", GLOBAL.USER_TDS[45].stack_space_bottom); bwprintf(COM2, "\tROUTE_SRV_STACK_TOP[%x]\n", GLOBAL.USER_TDS[45].stack_space_top); bwprintf(COM2, "\tROUTE_SRV_STACK_POINTER[%x]\n", GLOBAL.USER_TDS[45].sp); //bwprintf(COM2, "\tROUTE_SRV_ROUTE_STRUCT_SIZE[%x]\n"); bwprintf(COM2, "MSG_POINTER[%x]\n", ((struct request_send*)GLOBAL.ACTIVE_TASK->req)->msg); bwprintf(COM2, "\tMSG[0] = [%d]\n", ((int*)((struct request_send*)GLOBAL.ACTIVE_TASK->req)->msg)[0]); bwprintf(COM2, "\tMSG[1] = [%d]\n", ((int*)((struct request_send*)GLOBAL.ACTIVE_TASK->req)->msg)[1]); bwprintf(COM2, "\tMSG_LEN[%d]\n", ((struct request_send*)GLOBAL.ACTIVE_TASK->req)->msglen); bwprintf(COM2, "RPL_POINTER[%d]\n", ((struct request_send*)GLOBAL.ACTIVE_TASK->req)->reply); bwprintf(COM2, "\tRPL_LEN[%d]\n", ((struct request_send*)GLOBAL.ACTIVE_TASK->req)->replylen); bwgetc(COM2); */ //loop through all of the tds and find the task id with the greatest //number of blocked tasks on it, along with its task id /* int tdindex = 0; int block_max = 0; int i = 0; for(i = 0; i < TID_INDEX_MASK + 1; i++){ int sqs = GLOBAL.USER_TDS[i].send_Q_size; if(sqs > block_max){ block_max = sqs; tdindex = i; } }*/ //bwprintf(COM2, "\n\nBLK[%d] TD_IND[%d] TID[%d] PRIOR[%d]\n\n", block_max, tdindex, GLOBAL.USER_TDS[tdindex].task_id, GLOBAL.USER_TDS[tdindex].priority); //bwgetc(COM2); initialize_globals(&GLOBAL); initialize_first_task(&GLOBAL); initialize_hardware(); while(1){ req = NULL; if(GLOBAL.SCHEDULER.highest_priority == -1){ return 0; } //NO TASKS GLOBAL.ACTIVE_TASK = schedule(&GLOBAL); req = kerexit(GLOBAL.ACTIVE_TASK); //check for quit if(req->req_num == SYSCALL_QUIT){ return 0; } kernel_assert(req != NULL, "req != NULL") handle_request(&GLOBAL, req); } bwprintf(COM2, "GoodBye World\n\n"); cleanup_hardware(); return 0; }
Iterator *get_iterator() const { kernel_assert(is_iterator()); return _iterator; }
AggregateVirtualNode &operator=(const AggregateVirtualNode &other) { kernel_assert(0); return(*this); }
AggregateVirtualNode(const AggregateVirtualNode &other) : _members(0) { kernel_assert(0); }
UnionMetaClass &UnionMetaClass::operator=(const UnionMetaClass &) { kernel_assert(false); return(*this); }
const String VirtualNode::current_name( const VirtualIterator* state ) const { kernel_assert( false ); return emptyString; }
size_t get_iteration_num() const { kernel_assert(!is_iterator()); return _iteration_num; }
virtual const String current_name( const VirtualIterator* state ) const { int index = state->top().get_iteration_num(); kernel_assert( ! (*_members)[ index ]->_continuation ); return (*_members)[ index ] -> _name; }
Object& Object::operator=(const Object&) { kernel_assert(false); return(*this); }
static inline Elf64_Shdr *elf_section(Elf64_Ehdr *hdr, int idx) { kernel_assert(idx < hdr->e_shnum); return &elf_sheader(hdr)[idx]; }
act_t * act_register(reg_frame_t *frame, queue_t *queue, const char *name, status_e create_in_status, act_control_t *parent, size_t base) { (void)parent; KERNEL_TRACE("act", "Registering activation %s", name); if(kernel_next_act >= MAX_ACTIVATIONS) { kernel_panic("no act slot"); } act_t * act = kernel_acts + kernel_next_act; act->image_base = base; //TODO bit of a hack. the kernel needs to know what namespace service to use if(kernel_next_act == namespace_num_namespace) { KERNEL_TRACE("act", "found namespace"); ns_ref = act_create_sealed_ref(act); } #ifndef __LITE__ /* set name */ kernel_assert(ACT_NAME_MAX_LEN > 0); int name_len = 0; if(VCAP(name, 1, VCAP_R)) { name_len = imin(cheri_getlen(name), ACT_NAME_MAX_LEN-1); } for(int i = 0; i < name_len; i++) { char c = name[i]; act->name[i] = c; /* todo: sanitize the name if we do not trust it */ } act->name[name_len] = '\0'; #endif /* set status */ act->status = create_in_status; /*Some "documentation" for the interface between the kernel and activation start * * These fields are setup by the caller of act_register * * * * a0 : user GP argument (goes to main) * * c3 : user Cap argument (goes to main) * * * * These fields are setup by act_register itself. Although the queue is an argument to the function * * * * c21 : self control reference * * c23 : namespace reference (may be null for init and namespace) * * c24 : kernel interface table * * c25 : queue */ /* set namespace */ frame->cf_c21 = (capability)act_create_sealed_ctrl_ref(act); frame->cf_c23 = (capability)ns_ref; frame->cf_c24 = (capability)get_if(); frame->cf_c25 = (capability)queue; /* set queue */ msg_queue_init(act, queue); /* set expected sequence to not expecting */ act->sync_state.sync_token = 0; act->sync_state.sync_condition = 0; /* set scheduling status */ sched_create(act); /*update next_act */ kernel_next_act++; KERNEL_TRACE("register", "image base of %s is %lx", act->name, act->image_base); KERNEL_TRACE("act", "%s OK! ", __func__); return act; }
const MetaClass* VirtualNode::current_meta_class( const VirtualIterator* state ) const { kernel_assert( false ); return 0; }
static inline Elf64_Phdr *elf_segment(Elf64_Ehdr *hdr, int idx) { kernel_assert(idx < hdr->e_phnum); return &elf_pheader(hdr)[idx]; }
void STLMetaClass::set_constructor_function( ConstructorFunction constructorFunction ) { kernel_assert( false ); }