static int tty_init(iop_device_t *device) { int res; if ((res = smap_init()) != 0) return -res; if ((res = udp_init()) < 0) return res; if ((tty_sema = CreateMutex(IOP_MUTEX_UNLOCKED)) < 0) return -1; return 0; }
static inline int SMapInit(IPAddr IP, IPAddr NM, IPAddr GW, int argc, char *argv[]) { if(smap_init(argc, argv)!=0) { return 0; } dbgprintf("SMapInit: SMap initialized\n"); netif_add(&NIF,&IP,&NM,&GW,NULL,&SMapIFInit,tcpip_input); netif_set_default(&NIF); netif_set_up(&NIF); dbgprintf("SMapInit: NetIF added to ps2ip\n"); //Return 1 (true) to indicate success. return 1; }
//------------------------------------------------------------------------- int _start(int argc, char *argv[]) { // Init SMAP if (smap_init(&g_param.eth_addr_src[0]) != 0) return MODULE_NO_RESIDENT_END; // Does ARP request and wait reply to get server MAC address arp_init(&g_param); #ifdef UDPTTY // Init UDP tty ttyInit(&g_param); #endif // Init TCPIP layer tcp_init(&g_param); tcp_connect(); return MODULE_RESIDENT_END; }
/* ** init the vmm area */ void pmem_init(mbi_t *mbi) { info_data_t *info_r; size_t vmm_elf_sz, smap_sz; size_t pool_sz, pool_opt, pool_desc_sz; pg_cnt_t vmm_pg, vm_pg; offset_t fixed, area; module_t *mod; smap_t *smap; vmm_t *vmm; vm_t *vm; if(!(mbi->flags & MBI_FLAG_MMAP)) panic("no bios smap found"); if(mbi->mods_count != 2) panic("no module found"); mod = (module_t*)((offset_t)mbi->mods_addr + sizeof(module_t)); smap = &info->vm.dev.mem.smap; vmm = &info->vmm; vm = &info->vm; /* ** 1 - compute some sizes */ smap_parse(mbi, smap, &info->area.end, &info->hrd.mem.top); pmem_pg_predict(&vmm_pg, &vm_pg); smap_sz = smap->nr*sizeof(smap_e_t); vmm_elf_sz = elf_module_load_size(mod); pool_sz = pmem_vm_pg_pool_size(&vm_pg); if(!page_aligned(info->area.end)) info->area.end = page_align(info->area.end); if(mbi_get_opt(mbi, mod, "pool", pmem_pool_opt_hdl, (void*)&pool_opt)) { debug(PMEM, "increasing pool sz by %D*PAGE_SIZE\n", pool_opt); pool_sz += pool_opt*PAGE_SIZE; } pool_desc_sz = sizeof(pool_pg_desc_t)*(pool_sz/PAGE_SIZE); fixed = (VMM_MIN_STACK_SIZE + pool_sz + pmem_vmm_pg_size(&vmm_pg) + pmem_vm_pg_size(&vm_pg) + sizeof(vmc_t) #ifdef CONFIG_HAS_NET + net_mem_size_arch() #endif + sizeof(vmm_sgmem_t) + vmm_elf_sz + sizeof(long) + smap_sz + pool_desc_sz + sizeof(info_data_t)); info->area.start = page_align(info->area.end - fixed); if(info->area.start >= info->area.end) panic("not enough memory: end 0x%x fixed 0x%x\n", info->area.end, fixed); info->area.size = info->area.end - info->area.start; memset((void*)info->area.start, 0, info->area.size); /* ** 2 - init vmm area */ /* strictly aligned */ area = info->area.start; area += VMM_MIN_STACK_SIZE; vmm->stack_bottom = area; vm->cpu.gpr = (gpr64_ctx_t*)vmm->stack_bottom - 1; vmm->pool.addr = area; vmm->pool.sz = pool_sz; area += pool_sz; area = pmem_vmm_pg_alloc(vmm, area, &vmm_pg); area = pmem_vm_pg_alloc(vm, area, &vm_pg); /* page and 16B aligned */ vm->cpu.vmc = (vmc_t*)area; area += sizeof(vmc_t); #ifdef CONFIG_HAS_NET /* 16b aligned */ area = net_mem_init(area); #endif /* 8B aligned */ vmm->cpu.sg = (vmm_sgmem_t*)area; area += sizeof(vmm_sgmem_t); /* aligning not required */ area = long_align_next(area); vmm->base = area; area += vmm_elf_sz; smap->raw = (uint8_t*)area; area += smap_sz; vmm->pool.all = (pool_pg_desc_t*)area; area += pool_desc_sz; info_r = (info_data_t*)area; area += sizeof(info_data_t); /* ** 3 - finish setup */ vmm->size = vmm_elf_sz; vmm->entry = vmm->base + elf_module_entry(mod); elf_module_load_relocatable(mod, vmm->base); /* loaded vmm starts with 'info_data' pointer */ *(info_data_t**)vmm->base = info_r; smap_init(mbi, smap, info->area.start); pool_init(); memcpy((void*)info_r, (void*)info, sizeof(info_data_t)); info = info_r; #ifdef CONFIG_PMEM_DBG show_vmm_mem_map(); #endif debug_warning(); /* read IA32_VMX_BASIC to check: - memory types for vmcs and data pointed to by pointers in the vmcs - vmcs should be in cache coherent regions - msr/io bitmaps should be in write back regions XXX: fix pmem.c/vmem.c to respect that mmio memory should be mapped to uncachable or pte should be set PCD/PWT */ }
int main() { struct SMAP* map; int i, rc, ret; pthread_t ntid; struct thread_args *thr_arg; struct PAIR pair; for (i = 0; i < LOOP_TIMES *2; i++) { sprintf(buf[i], "%07d", i); } map = smap_init(LOOP_TIMES*2, DEFAULT_LOAD_FACTOR, 128, LOOP_TIMES/100, 1); if (map == NULL) printf("smap_init failed! \n"); for (i = 0; i < LOOP_TIMES; i++) { if (i%2) SMAP_SET_NUM_PAIR(&pair, i, buf[i], 8); else SMAP_SET_STR_PAIR(&pair, buf[i], 7, buf[i], 8); rc = smap_put(map, &pair, 1); if (rc < 0){ printf("put: i: %d, error: %d\n", i, rc); exit(1); } } nconn = 0; for (i = 0; i < MAXTHEADS; i++) { file[i].f_flags = 0; } for (i = 0; i < MAXTHEADS; i++) { if ((thr_arg = (struct thread_args *)malloc(sizeof(struct thread_args))) == NULL) { perror("malloc"); exit(1); } thr_arg->fptr = &file[i]; thr_arg->map = map; file[i].f_flags = F_CONNECTING; ret = pthread_create(&ntid, NULL, getmap, (void *)thr_arg); if (ret != 0) { perror("pthread_create"); exit(1); } nconn++; file[i].f_tid = ntid; } if ((thr_arg = (struct thread_args *)malloc(sizeof(struct thread_args))) == NULL) { perror("malloc"); exit(1); } thr_arg->fptr = &file[i]; thr_arg->map = map; file[i].f_flags = F_CONNECTING; ret = pthread_create(&ntid, NULL, insert_map, (void *)thr_arg); nconn++; i++; if ((thr_arg = (struct thread_args *)malloc(sizeof(struct thread_args))) == NULL) { perror("malloc"); exit(1); } thr_arg->fptr = &file[i]; thr_arg->map = map; file[i].f_flags = F_CONNECTING; ret = pthread_create(&ntid, NULL, del_map, (void *)thr_arg); nconn++; while (nconn != 0) { pthread_mutex_lock(&ndone_mutex); while(ndone == 0) pthread_cond_wait(&ndone_cond, &ndone_mutex); for (i = 0; i < MAXTHEADS+2; i++) { if (file[i].f_flags & F_DONE) { pthread_join(file[i].f_tid, NULL); //file[i].f_tid = 0; file[i].f_flags = 0; /* clears F_DONE */ ndone--; nconn--; } } pthread_mutex_unlock(&ndone_mutex); } return 0; }