char *XMPP_GenLocal() { char *buf; if(!xmpp_cl_user || !xmpp_cl_host || !xmpp_cl_rsrc) { buf=kralloc(64); kprints(buf, "null@null/null"); return(buf); } buf=kralloc(64); kprints(buf, "%s@%s/%s", xmpp_cl_user, xmpp_cl_host, xmpp_cl_rsrc); return(buf); }
extern void interrupt_dispatcher(const unsigned long interrupt_number) { /* Select a handler based on interrupt source. */ switch(interrupt_number) { case 32: { timer_interrupt_handler(); break; } case 33: { keyboard_interrupt_handler(); break; } case 39: { /* Spurious interrupt occurred. This could happen if we spend too long time with interrupts disabled. */ break; } default: { kprints("Unknown interrupt. Vector:"); kprinthex(interrupt_number); kprints("\n"); while(1) { outw(0x8a00, 0x8a00); outw(0x8a00, 0x8ae0); } } } /* Acknowledge interrupt so that new interrupts can be sent to the CPU. */ if (interrupt_number < 48) { if (interrupt_number >= 40) { outb(0xa0, 0x20); } outb(0x20, 0x20); } }
void push_back_process_queue(struct process_entry process) { /* Allocate space for element. */ struct process_queue_element * new_process = (struct process_queue_element * ) kalloc(sizeof( struct process_queue_element)) ; if(new_process==(struct process_queue_element * )ERROR) { while(1) kprints("push_queue_process couldn't claim memory for new process!"); } new_process->element=process; /* If queue is empty, we need to initialize both top and last element pointers. */ if(is_empty_process_queue()) { back_process = new_process; top_process = new_process; return; } else { back_process->next=new_process; back_process =new_process; } }
/*-- Cat pdnet;Protocols;XMPP/Jabber;JEP-0030 Form dyxNode *XMPP_JEP30_QueryItems(char *to, char *node); Description Queries items from a given jid and node. --*/ dyxNode *XMPP_JEP30_QueryItems(char *to, char *node) { char buf[16]; dyxNode *n; char *tag; n=dyxNewNode(); n->key=kstrdup("query"); dyxAddAttr(n, "xmlns", "http://jabber.org/protocol/disco#items"); if(node)dyxAddAttr(n, "node", node); kprints(buf, "jep30-%d", jep30_seq++); tag=buf; XMPP_SendIQ(to, "get", tag, n); while(1) { n=XMPP_CheckResults(tag); if(n)break; NET_Poll(); } return(n->first->first); }
struct process_entry pop_process_queue() { struct process_entry temp; struct process_queue_element * deleted; temp.context=(struct AMD64Context*) 0; /* Stack shouldn't be empty */ if(is_empty_process_queue()){ while(1) { kprints("kernel panic: process queue corrupted!"); } } deleted=top_process; temp=top_process->element; /* If there is a single element, we need to set both top and last element pointers to zero. */ if(top_process==back_process){ top_process =(struct process_queue_element *)0; back_process=(struct process_queue_element *)0; } else{ top_process = top_process->next; } /* Reclaim memory. */ kfree((uint64_t)deleted); return temp; }
char *jabberrpc_encode_url(NET_Reference *ref) { XMPP_RefInfo *ri; char *s; char buf[1024]; int bgbrpc; bgbrpc=!strcmp(ref->iface->name, "bgbrpc-jid"); ri=ref->refinfo; s=buf; if(!bgbrpc) s=kprints(s, "jabber-rpc:%s@%s/%s", ri->node, ri->host, ri->rsrc); else s=kprints(s, "bgbrpc-jid:%s@%s/%s", ri->node, ri->host, ri->rsrc); if(ri->obj)s=kprints(s, "#%s", ri->obj); s=kstrdup(buf); return(s); }
struct process_entry top_process_queue() { /* Stack shouldn't be empty */ if(is_empty_process_queue()){ while(1) { kprints("kernel panic: process queue corrupted!"); } } return top_process->element; }
char *jabberrpc_encode_jid(XMPP_RefInfo *ri) { char *s; char *buf; buf=kralloc(256); s=buf; s=kprints(s, "%s@%s/%s", ri->node, ri->host, ri->rsrc); s=krstrdup(buf); return(s); }
void kprinthex(const register long value) { const static char hex_helper[16] = "0123456789abcdef"; register int i; /* empty print buffer, explicitly defining termination character. */ //TODO Try to find a better solution, this approach eliminates a lot of cursor // position calculations though. char tmp_string[17] = " \0"; /* Print each character of the hexadecimal number. This is a very inefficient way of printing hexadecimal numbers. It is, however, very compact in terms of the number of source code lines. */ for (i = 15; i >= 0; i--) { tmp_string[15 - i] = hex_helper[(value >> (i * 4))&15]; } kprints(tmp_string); }
int XmlRpc_HandleMessage(HTTP_Con *con, char *msg) { elem t; char *s, *buf; kprint("handle message\n"); s=msg; t=ParseXML_ParseExpr(&s); while(1) { if(!ELEM_CONSP(t))break; if(CAR(t)!=SYM("?"))break; t=ParseXML_ParseExpr(&s); // TyFcn_DumpElem(t); } kprint("handle call\n"); t=XmlRpc_HandleCall(t); kprint("encode response\n"); t=XmlRpc_EncodeResponse(t); kprint("send response\n"); buf=kalloc(16384); s=buf; s=kprints(s, "<?xml version=\"1.0\"?>\n"); s=ParseXML_PrintExpr(s, t); HttpNode_SendResponse(con, buf, s-buf); kfree(buf); return(0); }
void *jabberrpc_rpc_call(NET_Reference *ref, void **args) { XMPP_RefInfo *ri; NetParse_Node *exp, *cur, *n, *n2, *lst; char *s, *t; int i; void *p; int bgbrpc; char *tag; kprint("Jabber-RPC: call\n"); bgbrpc=!strcmp(ref->iface->name, "bgbrpc-jid"); ri=ref->refinfo; lst=NULL; for(i=0; args[i]; i++) { if(!bgbrpc) n=XmlRpc_EncodeValue(args[i]); else n=BGBRPC_EncodeValue(args[i]); n2=n; n=NetParse_NewNode(); n->key=kstrdup("value"); n->first=n2; n2=n; n=NetParse_NewNode(); n->key=kstrdup("param"); n->first=n2; lst=NetParse_AddNodeEnd(lst, n); } n2=n; n=NetParse_NewNode(); n->key=kstrdup("params"); n->first=lst; exp=n; n=NetParse_NewNode(); n->text=kstrdup(ri->obj); n2=n; n=NetParse_NewNode(); n->key=kstrdup("methodName"); n->first=n2; n->next=exp; n2=n; n=NetParse_NewNode(); n->key=kstrdup("methodCall"); n->first=n2; n2=n; n=NetParse_NewNode(); n->key=kstrdup("query"); n->first=n2; if(!bgbrpc) NetParse_AddAttr(n, "xmlns", "jabber:iq:rpc"); else NetParse_AddAttr(n, "xmlns", "http://bgb-sys.sourceforge.net/bgb-rpc"); tag=kalloc(16); kprints(tag, "rpc%d", jabberrpc_seq++); s=jabberrpc_encode_jid(ri); XMPP_SendIQ(s, "set", tag, n); NetParse_FreeNode(n); // s=kalloc(16384); // t=s; // t=kprints(t, "<?xml version=\"1.0\"?>\n"); // t=NetParse_XML_PrintExpr(t, n); // t=kprints(t, "\n"); // t=HttpNode_Post(ref, "text/xml", s, strlen(s)); // kfree(s); // kprint("XML-RPC: got %s\n", t); // s=t; // exp=NetParse_XML_ParseExpr(&t); // if(exp)if(exp->key)if(!strcmp(exp->key, "?xml")) // exp=NetParse_XML_ParseExpr(&t); exp=NULL; while(!exp) { exp=JabberRPC_CheckResults(tag); NET_Poll(); } if(!exp) { kprint("PDLIB: XML-RPC: Parsed invalid response\n"); return(NULL); } if(!exp->key) { kprint("PDLIB: XML-RPC: Parsed invalid response\n"); return(NULL); } if(!strcmp(exp->key, "methodResponse")) { n=NetParse_FindKey(exp->first, "params"); cur=n->first; p=XmlRpc_DecodeValue(cur->first); // NetParse_FreeNode(exp); return(p); } return(NULL); }
void initialize(void) { kprints("\n\n\nThe kernel has booted!\n\n\n"); }
//main program loop void user_program(void) { switch(state){ case INIT: state_init(); break; case CONSTRUCT_ID: state_construct_id(); break; case TEST_ID: state_test_id(); break; case ELECT_SEED_TOP: state_elect_seeder_top(); break; case ELECT_SEED_BOTTOM: state_elect_seeder_bottom(); break; case CHECK_SEEDER: state_check_seeder(); break; case AWAIT_R_R: state_await_r_r(); break; case RECRUIT_REFERENCE: state_recruit_reference(); break; case SAVE_DATA: state_save_data(); break; case AWAIT_OTHERS: receive_message(); if(rh_reset_c++ >= RH_RESET_MAX) { //if seeder is not responsive, reset rh state rh_reset(); rh_reset_c = 0; } break; case ERROR_STATE: break; } #ifdef DEBUG_LED switch(role) { case ND: LED_WHITE; break; case SEEDER: LED_RED; break; case BOTTOM_SEEDER: LED_ORANGE; break; case BOT: LED_GREEN; break; default: LED_BLUE; break; } switch(state){ case INIT: case CONSTRUCT_ID: case TEST_ID: LED_TURQOISE; break; case ELECT_SEED_TOP: case ELECT_SEED_BOTTOM: break; case CHECK_SEEDER: if(counter % 50 >= 25) LED_BLUE; break; case SAVE_DATA: LED_OFF; break; case AWAIT_OTHERS: if(role != BOT) LED_OFF; break; case ERROR_STATE: LED_RED; _delay_ms(300); LED_ORANGE; _delay_ms(300); break; default: break; } #endif #ifdef DEBUG_MSG NEWLINE; kprints("id: "); kprinti(id); NEWLINE; kprints("state "); kprinti(state); NEWLINE; kprints("neighbours"); kprinti(neighbours->size); NEWLINE; struct neighbour *nd; struct list_node *cur; for(cur = neighbours->head; cur != 0; cur = cur->next) { nd = (struct neighbour *) cur->data; kprinti(nd->id); } NEWLINE; kprints("seeders "); kprinti(n_of_seeders()); NEWLINE; struct seeder *s; for(cur = seeders_list()->head ;cur != 0; cur = cur->next) { s = (struct seeder *) cur->data; kprinti(s->id); } NEWLINE; kprints("enable_tx "); kprinti(enable_tx); NEWLINE; #endif #ifndef DEBUG_MSG _delay_ms(5); #endif }
int system_call_implementation(void) { register int schedule=0; /*!< System calls may set this variable to 1. The variable is used as input to the scheduler to indicate if scheduling is necessary. */ switch(SYSCALL_ARGUMENTS.rax) { case SYSCALL_PRINTS: { kprints((char*) (SYSCALL_ARGUMENTS.rdi)); SYSCALL_ARGUMENTS.rax = ALL_OK; break; } case SYSCALL_PRINTHEX: { kprinthex(SYSCALL_ARGUMENTS.rdi); SYSCALL_ARGUMENTS.rax = ALL_OK; break; } case SYSCALL_DEBUGGER: { /* Enable the bochs iodevice and force a return to the debugger. */ outw(0x8a00, 0x8a00); outw(0x8a00, 0x8ae0); SYSCALL_ARGUMENTS.rax = ALL_OK; break; } case SYSCALL_VERSION: { SYSCALL_ARGUMENTS.rax = KERNEL_VERSION; break; } case SYSCALL_CREATEPROCESS: { int process_number, thread_number; long int executable_number = SYSCALL_ARGUMENTS.rdi; struct prepare_process_return_value prepare_process_ret_val; for (process_number = 0; process_number < MAX_NUMBER_OF_PROCESSES && process_table[process_number].threads > 0; process_number++) { } prepare_process_ret_val = prepare_process( executable_table[executable_number].elf_image, process_number, executable_table[executable_number].memory_footprint_size); if(0 == prepare_process_ret_val.first_instruction_address) { kprints("Error starting image\n"); } process_table[process_number].parent = thread_table[cpu_private_data.thread_index].data.owner; thread_number = allocate_thread(); thread_table[thread_number].data.owner = process_number; thread_table[thread_number].data.registers.integer_registers.rflags = 0x200; thread_table[thread_number].data.registers.integer_registers.rip = prepare_process_ret_val.first_instruction_address; process_table[process_number].threads += 1; SYSCALL_ARGUMENTS.rax = ALL_OK; thread_queue_enqueue(&ready_queue,thread_number); /*cpu_private_data.thread_index = thread_number;*/ break; } case SYSCALL_TERMINATE: { int i; int owner_process = thread_table[cpu_private_data.thread_index].data.owner; int parent_process = process_table[owner_process].parent; thread_table[cpu_private_data.thread_index].data.owner = -1; /* Terminate Thread */ process_table[owner_process].threads -= 1; /* Decrement Thread count */ if(process_table[owner_process].threads < 1) { cleanup_process(owner_process); } for(i=0; i < MAX_NUMBER_OF_THREADS && thread_table[i].data.owner != parent_process; i++) { } /*cpu_private_data.thread_index = i;*/ /*thread_queue_dequeue(&ready_queue);*/ schedule = 1; break; } /* Do not touch any lines above or including this line. */ /* Add the implementation of more system calls here. */ /* Do not touch any lines below or including this line. */ default: { /* No system call defined. */ SYSCALL_ARGUMENTS.rax = ERROR_ILLEGAL_SYSCALL; } } return schedule; }
/*! Copies an ELF image to memory and prepares a process. prepare_process does some checks to avoid that corrupt images gets copied to memory. However, the checks are not as thorough as the check in initialize. \return A prepare_process_return_value struct holding the first address of the process image and an address to the page table for the process. */ static struct prepare_process_return_value prepare_process(const struct Elf64_Ehdr* elf_image /*!< Points to the ELF image to copy. */, const unsigned int process /*!< The index of the process that is to be created. */, unsigned long memory_footprint_size /*!< Holds the maximum amount of memory, in bytes, the image is allowed to use. */) { /* Get the address of the program header table. */ int program_header_index; struct Elf64_Phdr* program_header = ((struct Elf64_Phdr*) (((char*) (elf_image)) + elf_image->e_phoff)); unsigned long used_memory = 0; /* Allocate memory for the page table and for the process' memory. All of this is allocated in a single memory block. The memory block is set up so that it cannot be de-allocated via kfree. */ long address_to_memory_block = kalloc(memory_footprint_size+19*4*1024, process, ALLOCATE_FLAG_KERNEL); struct prepare_process_return_value ret_val = {0, 0}; /* First check that we have enough memory. */ if (0 >= address_to_memory_block) { /* No, we don't. */ return ret_val; } ret_val.page_table_address = address_to_memory_block; { /* Create a page table for the process. */ unsigned long* dst = (unsigned long*) address_to_memory_block; unsigned long* src = (unsigned long*) (kernel_page_table_root + 3*4*1024); register int i; /* Clear the first frames. */ for(i=0; i<3*4*1024/8; i++) { *dst = 0; } /* Build the pml4 table. */ dst = (unsigned long*) (address_to_memory_block); *dst = (address_to_memory_block+4096) | 7; /* Build the pdp table. */ dst = (unsigned long*) (address_to_memory_block+4096); *dst = (address_to_memory_block+2*4096) | 7; /* Build the pd table. */ dst = (unsigned long*) (address_to_memory_block+2*4096); for(i=0; i<16; i++) { *dst++ = (address_to_memory_block+(3+i)*4096) | 7; } /* Copy the rest of the kernel page table. */ dst = (unsigned long*) (address_to_memory_block + 3*4*1024); for(i=0; i<(16*1024*4/8); i++) { *dst++ = *src++; } } /* Update the start of the block to be after the page table. */ address_to_memory_block += 19*4*1024; /* Scan through the program header table and copy all PT_LOAD segments to memory. Perform checks at the same time.*/ for (program_header_index = 0; program_header_index < elf_image->e_phnum; program_header_index++) { if (PT_LOAD == program_header[program_header_index].p_type) { /* Calculate destination adress. */ unsigned long* dst = (unsigned long *) (address_to_memory_block + used_memory); /* Check for odd things. */ if ( /* Check if the segment is contiguous */ (used_memory != program_header[program_header_index].p_vaddr) || /* Check if the segmen fits in memory. */ (used_memory + program_header[program_header_index].p_memsz > memory_footprint_size) || /* Check if the segment has an odd size. We require the segment size to be an even multiple of 8. */ (0 != (program_header[program_header_index].p_memsz&7)) || (0 != (program_header[program_header_index].p_filesz&7))) { /* Something went wrong. Panic. */ while(1) { kprints( "Kernel panic: Trying to create a process out of a corrupt executable image!"); } } /* First copy p_filesz from the image to memory. */ { /* Calculate the source address. */ unsigned long* src = (unsigned long *) (((char*) elf_image)+ program_header[program_header_index].p_offset); unsigned long count = program_header[program_header_index].p_filesz/8; for(; count>0; count--) { *dst++=*src++; } } /* Then write p_memsz-p_filesz bytes of zeros. This to pad the segment. */ { unsigned long count = (program_header[program_header_index].p_memsz- program_header[program_header_index].p_filesz)/8; for(; count>0; count--) { *dst++=0; } } /* Set the permission bits on the loaded segment. */ update_memory_protection(ret_val.page_table_address, program_header[program_header_index].p_vaddr+ address_to_memory_block, program_header[program_header_index].p_memsz, program_header[program_header_index].p_flags&7); /* Finally update the amount of used memory. */ used_memory += program_header[program_header_index].p_memsz; } } /* Find out the address to the first instruction to be executed. */ ret_val.first_instruction_address = address_to_memory_block + elf_image->e_entry; return ret_val; }
void initialize(void) { register int i; /* Loop over all threads in the thread table and reset the owner. */ for(i=0; i<MAX_NUMBER_OF_THREADS; i++) { thread_table[i].data.owner=-1; /* -1 is an illegal process_table index. We use that to show that the thread is dormant. */ } /* Loop over all processes in the thread table and mark them as not executing. */ for(i=0; i<MAX_NUMBER_OF_PROCESSES; i++) { process_table[i].threads=0; /* No executing process has less than 1 thread. */ } /* Initialize the ready queue. */ thread_queue_init(&ready_queue); /* Initialize the list of blocked threads waiting for the keyboard. */ thread_queue_init(&keyboard_blocked_threads); /* Calculate the number of pages. */ memory_pages = memory_size/(4*1024); { /* Calculate the number of frames occupied by the kernel and executable images. */ const register int k=first_available_memory_byte/(4*1024); /* Mark the pages that are used by the kernel or executable images as taken by the kernel (-2 in the owner field). */ for(i=0; i<k; i++) { page_frame_table[i].owner=-2; page_frame_table[i].free_is_allowed=0; } /* Loop over all the rest page frames and mark them as free (-1 in owner field). */ for(i=k; i<memory_pages; i++) { page_frame_table[i].owner=-1; page_frame_table[i].free_is_allowed=1; } /* Mark any unusable pages as taken by the kernel. */ for(i=memory_pages; i<MAX_NUMBER_OF_FRAMES; i++) { page_frame_table[i].owner=-2; page_frame_table[i].free_is_allowed=0; } } /* Go through the linked list of executable images and verify that they are correct. At the same time build the executable_table. */ { const struct executable_image* image; for (image=ELF_images_start; 0!=image; image=image->next) { unsigned long image_size; /* First calculate the size of the image. */ if (0 != image->next) { image_size = ((char *) (image->next)) - ((char *) image) -1; } else { image_size = ((char *) ELF_images_end) - ((char *) image) - 1; } /* Check that the image is an ELF image and that it is of the right type. */ if ( /* EI_MAG0 - EI_MAG3 have to be 0x7f 'E' 'L' 'F'. */ (image->elf_image.e_ident[EI_MAG0] != 0x7f) || (image->elf_image.e_ident[EI_MAG1] != 'E') || (image->elf_image.e_ident[EI_MAG2] != 'L') || (image->elf_image.e_ident[EI_MAG3] != 'F') || /* Check that the image is a 64-bit image. */ (image->elf_image.e_ident[EI_CLASS] != 2) || /* Check that the image is a little endian image. */ (image->elf_image.e_ident[EI_DATA] != 1) || /* And that the version of the image format is correct. */ (image->elf_image.e_ident[EI_VERSION] != 1) || /* NB: We do not check the ABI or ABI version. We really should but currently those fields are not set properly by the build tools. They are both set to zero which means: System V ABI, third edition. However, the ABI used is clearly not System V :-) */ /* Check that the image is executable. */ (image->elf_image.e_type != 2) || /* Check that the image is executable on AMD64. */ (image->elf_image.e_machine != 0x3e) || /* Check that the object format is corrent. */ (image->elf_image.e_version != 1) || /* Check that the processor dependent flags are all reset. */ (image->elf_image.e_flags != 0) || /* Check that the length of t he header is what we expect. */ (image->elf_image.e_ehsize != sizeof(struct Elf64_Ehdr)) || /* Check that the size of the program header table entry is what we expect. */ (image->elf_image.e_phentsize != sizeof(struct Elf64_Phdr)) || /* Check that the number of entries is reasonable. */ (image->elf_image.e_phnum < 0) || (image->elf_image.e_phnum > 8) || /* Check that the entry point is within the image. */ (image->elf_image.e_entry < 0) || (image->elf_image.e_entry >= image_size) || /* Finally, check that the program header table is within the image. */ (image->elf_image.e_phoff > image_size) || ((image->elf_image.e_phoff + image->elf_image.e_phnum * sizeof(struct Elf64_Phdr)) > image_size ) ) { /* There is something wrong with the image. */ while (1) { kprints("Kernel panic! Corrupt executable image.\n"); } continue; } /* Now check the program header table. */ { int program_header_index; struct Elf64_Phdr* program_header = ((struct Elf64_Phdr*) (((char*) &(image->elf_image)) + image->elf_image.e_phoff)); unsigned long memory_footprint_size = 0; for (program_header_index = 0; program_header_index < image->elf_image.e_phnum; program_header_index++) { /* First sanity check the entry. */ if ( /* Check that the segment is a type we can handle. */ (program_header[program_header_index].p_type < 0) || (!((program_header[program_header_index].p_type == PT_NULL) || (program_header[program_header_index].p_type == PT_LOAD) || (program_header[program_header_index].p_type == PT_PHDR))) || /* Look more carefully into loadable segments. */ ((program_header[program_header_index].p_type == PT_LOAD) && /* Check if any flags that we can not handle is set. */ (((program_header[program_header_index].p_flags & ~7) != 0) || /* Check if sizes and offsets look sane. */ (program_header[program_header_index].p_offset < 0) || (program_header[program_header_index].p_vaddr < 0) || (program_header[program_header_index].p_filesz < 0) || (program_header[program_header_index].p_memsz < 0) || /* Check if the segment has an odd size. We require the segment size to be an even multiple of 8. */ (0 != (program_header[program_header_index].p_memsz&7)) || (0 != (program_header[program_header_index].p_filesz&7)) || /* Check if the segment goes beyond the image. */ ((program_header[program_header_index].p_offset + program_header[program_header_index].p_filesz) > image_size))) ) { while (1) { kprints("Kernel panic! Corrupt segment.\n"); } } /* Check that all PT_LOAD segments are contiguous starting from address 0. Also, calculate the memory footprint of the image. */ if (program_header[program_header_index].p_type == PT_LOAD) { if (program_header[program_header_index].p_vaddr != memory_footprint_size) { while (1) { kprints("Kernel panic! Executable image has illegal memory layout.\n"); } } memory_footprint_size += program_header[program_header_index].p_memsz; } } executable_table[executable_table_size].memory_footprint_size = memory_footprint_size; } executable_table[executable_table_size].elf_image = &(image->elf_image); executable_table_size += 1; kprints("Found an executable image.\n"); if (executable_table_size >= MAX_NUMBER_OF_PROCESSES) { while (1) { kprints("Kernel panic! Too many executable images found.\n"); } } } } /* Check that actually some executable files are found. Also check that the thread structure is of the right size. The assembly code will break if it is not. Finally, initialize memory protection. You will implement memory protection in task A4. */ if ((0 >= executable_table_size) || (1024 != sizeof(union thread))) { while (1) { kprints("Kernel panic! Can not boot.\n"); } } initialize_memory_protection(); initialize_ports(); initialize_thread_synchronization(); /* All sub-systems are now initialized. Kernel areas can now get the right memory protection. */ { /* Use the kernel's ELF header. */ struct Elf32_Phdr* program_header = ((struct Elf32_Phdr*) (((char*) (0x00100000)) + ((struct Elf32_Ehdr*)0x00100000)-> e_phoff)); /* Traverse the program header. */ short number_of_program_header_entries = ((struct Elf32_Ehdr*)0x00100000)->e_phnum; int i; for(i=0; i<number_of_program_header_entries; i++) { if (PT_LOAD == program_header[i].p_type) { /* Set protection on each segment. */ update_memory_protection(kernel_page_table_root, program_header[i].p_vaddr, program_header[i].p_memsz, (program_header[i].p_flags&7) | PF_KERNEL); } } } /* Start running the first program in the executable table. */ /* Use the ELF program header table and copy the right portions of the image to memory. This is done by prepare_process. */ { struct prepare_process_return_value prepare_process_ret_val = prepare_process(executable_table[0].elf_image, 0, executable_table[0].memory_footprint_size); if (0 == prepare_process_ret_val.first_instruction_address) { while (1) { kprints("Kernel panic! Can not start process 0!\n"); } } /* Start executable program 0 as process 0. At this point, there are no processes so we can just grab entry 0 and use it. */ process_table[0].parent=-1; /* We put -1 to indicate that there is no parent process. */ process_table[0].threads=1; /* all processes should start with an allocated port with id zero */ if (-1 == allocate_port(0,0)) { while(1) { kprints("Kernel panic! Can not initialize the IPC system!\n"); } } /* Set the page table address. */ process_table[0].page_table_root = prepare_process_ret_val.page_table_address; cpu_private_data.page_table_root = prepare_process_ret_val.page_table_address; /* We need a thread. We just take the first one as no threads are running or have been allocated at this point. */ thread_table[0].data.owner=0; /* 0 is the index of the first process. */ /* We reset all flags and enable interrupts */ thread_table[0].data.registers.integer_registers.rflags=0x200; /* And set the start address. */ thread_table[0].data.registers.integer_registers.rip = prepare_process_ret_val.first_instruction_address; /* Finally we set the current thread. */ cpu_private_data.thread_index = 0; } /* Set up the timer hardware to generate interrupts 200 times a second. */ outb(0x43, 0x36); outb(0x40, 78); outb(0x40, 23); /* Set up the keyboard controller. */ /* Empty the keyboard buffer. */ { register unsigned char status_byte; do { status_byte=inb(0x64); if ((status_byte&3)==1) { inb(0x60); } } while((status_byte&0x3)!=0x0); } /* Change the command byte to enable interrupts. */ outb(0x64, 0x20); { register unsigned char keyboard_controller_command_byte; { register unsigned char status_byte; do { status_byte=inb(0x64); } while((status_byte&3)!=1); } keyboard_controller_command_byte=inb(0x60); /* Enable keyboard interrupts. */ keyboard_controller_command_byte|=1; kprints("Keyboard controller command byte:"); kprinthex(keyboard_controller_command_byte); kprints("\n"); outb(0x64, 0x60); outb(0x60, keyboard_controller_command_byte); /* Wait until command is done. */ { register unsigned char status_byte; do { status_byte=inb(0x64); } while((status_byte&0x2)!=0x0); } } /* Now we set up the interrupt controller to allow timer and keyboard interrupts. */ outb(0x20, 0x11); outb(0xA0, 0x11); outb(0x21, 0x20); outb(0xA1, 0x28); outb(0x21, 1<<2); outb(0xA1, 2); outb(0x21, 1); outb(0xA1, 1); outb(0x21, 0xfc); outb(0xA1, 0xff); clear_screen(); kprints("\n\n\nThe kernel has booted!\n\n\n"); /* Now go back to the assembly language code and let the process run. */ }
void initialize(void) { register int i; /* Loop over all threads in the thread table and reset the owner. */ for(i=0; i<MAX_NUMBER_OF_THREADS; i++) { thread_table[i].data.owner=-1; /* -1 is an illegal process_table index. We use that to show that the thread is dormant. */ } /* Loop over all processes in the thread table and mark them as not executing. */ for(i=0; i<MAX_NUMBER_OF_PROCESSES; i++) { process_table[i].threads=0; /* No executing process has less than 1 thread. */ } /* Initialize the ready queue. */ thread_queue_init(&ready_queue); /* Go through the linked list of executable images and verify that they are correct. At the same time build the executable_table. */ { const struct executable_image* image; for (image=ELF_images_start; 0!=image; image=image->next) { unsigned long image_size; /* First calculate the size of the image. */ if (0 != image->next) { image_size = ((char *) (image->next)) - ((char *) image) -1; } else { image_size = ((char *) ELF_images_end) - ((char *) image) - 1; } /* Check that the image is an ELF image and that it is of the right type. */ if ( /* EI_MAG0 - EI_MAG3 have to be 0x7f 'E' 'L' 'F'. */ (image->elf_image.e_ident[EI_MAG0] != 0x7f) || (image->elf_image.e_ident[EI_MAG1] != 'E') || (image->elf_image.e_ident[EI_MAG2] != 'L') || (image->elf_image.e_ident[EI_MAG3] != 'F') || /* Check that the image is a 64-bit image. */ (image->elf_image.e_ident[EI_CLASS] != 2) || /* Check that the image is a little endian image. */ (image->elf_image.e_ident[EI_DATA] != 1) || /* And that the version of the image format is correct. */ (image->elf_image.e_ident[EI_VERSION] != 1) || /* NB: We do not check the ABI or ABI version. We really should but currently those fields are not set properly by the build tools. They are both set to zero which means: System V ABI, third edition. However, the ABI used is clearly not System V :-) */ /* Check that the image is executable. */ (image->elf_image.e_type != 2) || /* Check that the image is executable on AMD64. */ (image->elf_image.e_machine != 0x3e) || /* Check that the object format is corrent. */ (image->elf_image.e_version != 1) || /* Check that the processor dependent flags are all reset. */ (image->elf_image.e_flags != 0) || /* Check that the length of t he header is what we expect. */ (image->elf_image.e_ehsize != sizeof(struct Elf64_Ehdr)) || /* Check that the size of the program header table entry is what we expect. */ (image->elf_image.e_phentsize != sizeof(struct Elf64_Phdr)) || /* Check that the number of entries is reasonable. */ (image->elf_image.e_phnum < 0) || (image->elf_image.e_phnum > 8) || /* Check that the entry point is within the image. */ (image->elf_image.e_entry < 0) || (image->elf_image.e_entry >= image_size) || /* Finally, check that the program header table is within the image. */ (image->elf_image.e_phoff > image_size) || ((image->elf_image.e_phoff + image->elf_image.e_phnum * sizeof(struct Elf64_Phdr)) > image_size ) ) { /* There is something wrong with the image. */ while (1) { kprints("Kernel panic! Corrupt executable image.\n"); } continue; } /* Now check the program header table. */ { int program_header_index; struct Elf64_Phdr* program_header = ((struct Elf64_Phdr*) (((char*) &(image->elf_image)) + image->elf_image.e_phoff)); unsigned long memory_footprint_size = 0; for (program_header_index = 0; program_header_index < image->elf_image.e_phnum; program_header_index++) { /* First sanity check the entry. */ if ( /* Check that the segment is a type we can handle. */ (program_header[program_header_index].p_type < 0) || (!((program_header[program_header_index].p_type == PT_NULL) || (program_header[program_header_index].p_type == PT_LOAD) || (program_header[program_header_index].p_type == PT_PHDR))) || /* Look more carefully into loadable segments. */ ((program_header[program_header_index].p_type == PT_LOAD) && /* Check if any flags that we can not handle is set. */ (((program_header[program_header_index].p_flags & ~7) != 0) || /* Check if sizes and offsets look sane. */ (program_header[program_header_index].p_offset < 0) || (program_header[program_header_index].p_vaddr < 0) || (program_header[program_header_index].p_filesz < 0) || (program_header[program_header_index].p_memsz < 0) || /* Check if the segment has an odd size. We require the segement size to be an even multiple of 8. */ (0 != (program_header[program_header_index].p_memsz&7)) || (0 != (program_header[program_header_index].p_filesz&7)) || /* Check if the segment goes beyond the image. */ ((program_header[program_header_index].p_offset + program_header[program_header_index].p_filesz) > image_size))) ) { while (1) { kprints("Kernel panic! Corrupt segment.\n"); } } /* Check that all PT_LOAD segments are contigous starting from address 0. Also, calculate the memory footprint of the image. */ if (program_header[program_header_index].p_type == PT_LOAD) { if (program_header[program_header_index].p_vaddr != memory_footprint_size) { while (1) { kprints("Kernel panic! Executable image has illegal memory layout.\n"); } } memory_footprint_size += program_header[program_header_index].p_memsz; } } executable_table[executable_table_size].memory_footprint_size = memory_footprint_size; } executable_table[executable_table_size].elf_image = &(image->elf_image); executable_table_size += 1; kprints("Found an executable image.\n"); if (executable_table_size >= MAX_NUMBER_OF_PROCESSES) { while (1) { kprints("Kernel panic! Too many executable images found.\n"); } } } } /* Check that actually some executable files are found. Also check that the thread structure is of the right size. The assembly code will break if it is not. */ if ((0 >= executable_table_size) || (1024 != sizeof(union thread))) { while (1) { kprints("Kernel panic! Can not boot.\n"); } } /* Start running the first program in the executable table. */ /* Use the ELF program header table and copy the right portions of the image to memory. This is done by prepare_process. */ { struct prepare_process_return_value prepare_process_ret_val = prepare_process(executable_table[0].elf_image, 0, executable_table[0].memory_footprint_size); if (0 == prepare_process_ret_val.first_instruction_address) { while (1) { kprints("Kernel panic! Can not start process 0!\n"); } } /* Start executable program 0 as process 0. At this point, there are no processes so we can just grab entry 0 and use it. */ process_table[0].parent=-1; /* We put -1 to indicate that there is no parent process. */ process_table[0].threads=1; /* We need a thread. We just take the first one as no threads are running or have been allocated at this point. */ thread_table[0].data.owner=0; /* 0 is the index of the first process. */ /* We reset all flags and enable interrupts */ thread_table[0].data.registers.integer_registers.rflags=0x200; /* And set the start address. */ thread_table[0].data.registers.integer_registers.rip = prepare_process_ret_val.first_instruction_address; /* Finally we set the current thread. */ cpu_private_data.thread_index = 0; //cpu_private_data.ticks_left_of_time_slice = TIMESLICE_SIZE; } /* Set up the timer hardware to generate interrupts 200 times a second. */ outb(0x43, 0x36); outb(0x40, 78); outb(0x40, 23); /* Now we set up the interrupt controller to allow timer interrupts. */ outb(0x20, 0x11); outb(0xA0, 0x11); outb(0x21, 0x20); outb(0xA1, 0x28); outb(0x21, 1<<2); outb(0xA1, 2); outb(0x21, 1); outb(0xA1, 1); outb(0x21, 0xfe); outb(0xA1, 0xff); kprints("\n\n\nThe kernel has booted!\n\n\n"); /* Now go back to the assembly language code and let the process run. */ }
int xmlrpc_post(HTTP_Resource *rsrc, HTTP_Con *con, char **type, char **data, int *len) { char *s, *t; char *fcn; char **argn; long *args; int i; long l; char tbuf[256]; NetParse_Node *exp, *cur, *n, *n2; s=*data; exp=NetParse_XML_ParseExpr(&s); if(exp)if(exp->key)if(!strcmp(exp->key, "?xml")) exp=NetParse_XML_ParseExpr(&s); if(!exp) { *type=kstrdup("text/plain"); *data=kstrdup("PDLIB: XML-RPC: Parsed invalid request\n"); *len=strlen(*data); return(403); } if(!exp->key) { *type=kstrdup("text/plain"); *data=kstrdup("PDLIB: XML-RPC: Parsed invalid request\n"); *len=strlen(*data); return(403); } if(!strcmp(exp->key, "methodCall")) { argn=kalloc(8*sizeof(char *)); args=kalloc(8*sizeof(long)); i=0; n=NetParse_FindKey(exp->first, "methodName"); fcn=n->first->text; n=NetParse_FindKey(exp->first, "params"); cur=n->first; while(cur) { XmlRpc_DecodeValue(cur->first, &argn[i], &args[i]); i++; cur=cur->next; } argn[i++]=NULL; l=NET_CallExport(fcn, &s, args, argn); if(!strcmp(s, "NoFunc")) { sprintf(tbuf, "PDLIB: XML-RPC: No Function '%s'\n", fcn); *type=kstrdup("text/plain"); *data=kstrdup(tbuf); *len=strlen(*data); return(403); } n=XmlRpc_EncodeValue(s, l); n2=n; n=NetParse_NewNode(); n->key=kstrdup("value"); n->first=n2; n2=n; n=NetParse_NewNode(); n->key=kstrdup("param"); n->first=n2; n2=n; n=NetParse_NewNode(); n->key=kstrdup("params"); n->first=n2; n2=n; n=NetParse_NewNode(); n->key=kstrdup("methodResponse"); n->first=n2; s=kalloc(16384); t=s; t=kprints(t, "<?xml version=\"1.0\"?>\n"); // t=kprints(t, "<!-- test -->\n"); t=NetParse_XML_PrintExpr(t, n); t=kprints(t, "\n"); *type=kstrdup("text/xml"); *data=s; *len=strlen(s); return(200); } sprintf(tbuf, "PDLIB: XML-RPC: Unknown request type '%s'\n", exp->key); *type=kstrdup("text/plain"); *data=kstrdup(tbuf); *len=strlen(*data); return(403); }