sexpr read_directory (const char *p) { sexpr r = sx_end_of_list; unsigned int l = 0, s = 0, map_s, mapd_l; while (p[l]) { if (p[l] == '/') s++; l++; } s++; l++; map_s = sizeof (char *) * s; mapd_l = l; if (map_s < STACK_BUFFER_SIZE) { char buf_map [STACK_BUFFER_SIZE]; if (mapd_l < STACK_BUFFER_SIZE) { char buf_mapd [STACK_BUFFER_SIZE]; r = read_directory_w (p, (char **)buf_map, buf_mapd); } else { char *mapd = aalloc (mapd_l); r = read_directory_w (p, (char **)buf_map, mapd); afree (mapd_l, mapd); } } else { char **map = aalloc (map_s); if (mapd_l < STACK_BUFFER_SIZE) { char buf_mapd [STACK_BUFFER_SIZE]; r = read_directory_w (p, map, buf_mapd); } else { char *mapd = aalloc (mapd_l); r = read_directory_w (p, map, mapd); afree (mapd_l, mapd); } afree (map_s, map); } return r; }
/* * Get ethernet address and compute checksum to be sure * that there is a board at this address. */ int eth_init() { uint16 checksum, sum; register int i; for (i = 0; i < 6; i++) eth_myaddr[i] = in_byte(NE_BASEREG + i); sum = 0; for (i = 0x00; i <= 0x0B; i++) sum += in_byte(NE_BASEREG + i); for (i = 0x0E; i <= 0xF; i++) sum += in_byte(NE_BASEREG + i); checksum = in_byte(NE_BASEREG + 0x0C) | (in_byte(NE_BASEREG + 0x0D) << 8); if (sum != checksum) return 0; /* initblock, tmd, and rmd should be 8 byte aligned ! */ initblock = (initblock_t *) aalloc(sizeof(initblock_t), 8); tmd = (tmde_t *) aalloc(sizeof(tmde_t), 8); rmd = (rmde_t *) aalloc(NRCVRING * sizeof(rmde_t), 8); eth_reset(); return 1; }
char *fs_list(uint64_t dir, int entry) { struct msg *msg; char *name; msg = aalloc(sizeof(struct msg) + sizeof(uint32_t), PAGESZ); if (!msg) return NULL; msg->source = RP_CONS(getpid(), 0); msg->target = dir; msg->length = sizeof(uint32_t); msg->port = PORT_LIST; msg->arch = ARCH_NAT; ((uint32_t*) msg->data)[0] = entry; if (msend(msg)) return NULL; msg = mwait(PORT_REPLY, dir); if (msg->length == 0) { free(msg); return NULL; } name = strdup((const char*) msg->data); free(msg); return name; }
size_t rp_read(uint64_t file, void *buf, size_t size, uint64_t offset) { struct msg *msg; msg = aalloc(sizeof(struct msg) + sizeof(uint64_t) + sizeof(uint32_t), PAGESZ); if (!msg) return 0; msg->source = RP_CURRENT_THREAD; msg->target = file; msg->length = sizeof(uint64_t) + sizeof(uint32_t); msg->port = PORT_READ; msg->arch = ARCH_NAT; ((uint64_t*) msg->data)[0] = offset; ((uint32_t*) msg->data)[2] = size; if (msend(msg)) return 0; msg = mwait(PORT_REPLY, file); if (size > msg->length) { size = msg->length; } if (size) memcpy(buf, msg->data, size); free(msg); return size; }
// Wrapper for aalloc() void* FsAalloc(size_t size) { void* mem = aalloc(size); if (mem == NULL) FsError(ENOMEM); return mem; }
int rp_share(uint64_t rp, void *buf, size_t size, uint64_t offset, int prot) { struct msg *msg; int err; /* check alignment */ if ((uintptr_t) buf % PAGESZ) { return 1; } msg = aalloc(PAGESZ + size, PAGESZ); if (!msg) return 1; msg->source = RP_CURRENT_THREAD; msg->target = rp; msg->length = PAGESZ - sizeof(struct msg) + size; msg->port = PORT_SHARE; msg->arch = ARCH_NAT; ((uint64_t*) msg->data)[0] = offset; page_self(buf, &msg->data[PAGESZ - sizeof(struct msg)], size); page_prot(&msg->data[PAGESZ - sizeof(struct msg)], size, prot); if (msend(msg)) return 1; msg = mwait(PORT_REPLY, rp); if (msg->length != 1) { err = 1; } else { err = msg->data[0]; } free(msg); return err; }
void pkt_init() { if (pool == (packet_t *)0) pool = (packet_t *) aalloc(PKT_POOLSIZE * sizeof(packet_t), 0); bzero(pool, PKT_POOLSIZE * sizeof(packet_t)); last = pool; }
int msendb(uint64_t target, uint8_t port) { struct msg *msg; msg = aalloc(sizeof(struct msg), PAGESZ); if (!msg) return 1; msg->source = RP_CONS(getpid(), 0); msg->target = target; msg->length = 0; msg->port = port; msg->arch = ARCH_NAT; return msend(msg); }
static void addBucket(SymUnion *st) { SymUnion *bckt = (SymUnion *) aalloc(ALIGN, sizeof(Bucket)); SymUnion *scoop = bckt + 1; bckt->hdr.prev = st; bckt->hdr.next = st->hdr.next; bckt->hdr.free = scoop; st->hdr.next->hdr.prev = bckt; st->hdr.next = bckt; scoop->entry.refs = 0; scoop->entry.stat.free.ptr = NULL; scoop->entry.stat.free.count = BSIZE - 1; }
int event(rp_t rp, const char *value) { struct msg *msg; if (!value) value = ""; msg = aalloc(sizeof(struct msg) + strlen(value) + 1, PAGESZ); if (!msg) return 1; msg->source = RP_CONS(getpid(), 0); msg->target = rp; msg->length = strlen(value) + 1; msg->action = ACTION_EVENT; msg->arch = ARCH_NAT; strcpy((char*) msg->data, value); return msend(msg); }
int rp_sync(uint64_t file) { struct msg *msg; msg = aalloc(sizeof(struct msg), PAGESZ); if (!msg) return 1; msg->source = RP_CURRENT_THREAD; msg->target = file; msg->length = 0; msg->port = PORT_SYNC; msg->arch = ARCH_NAT; if (msend(msg)) return 1; msg = mwait(PORT_REPLY, file); free(msg); return 0; }
void *load_exec(const char *name) { int fd; uint64_t size; char *path; void *image; /* attempt to find requested file */ if (name[0] == '/' || name[0] == '@') { path = strdup(name); } else { path = strvcat(getenv("PATH"), "/", name, NULL); } fd = ropen(-1, fs_find(path), STAT_READER); if (fd < 0 || !rp_type(fd_rp(fd), "file")) { /* file not found */ return NULL; } else { /* read whole file into buffer */ size = rp_size(fd_rp(fd)); if (!size) { return NULL; } image = aalloc(size, PAGESZ); if (!image) { return NULL; } if (rp_read(fd_rp(fd), image, size, 0) != size) { free(image); close(fd); return NULL; } close(fd); return image; } }
sexpr graph_create() { static struct memory_pool pool = MEMORY_POOL_INITIALISER(sizeof (struct graph)); struct graph *gr; if (!initialised) { graph_initialise (); } gr = (struct graph *) get_pool_mem(&pool); gr->type = graph_type_identifier; gr->node_count = 0; gr->nodes = (struct graph_node **)aalloc (get_chunked_node_size(0)); gr->on_free = (void*)0; gc_base_items++; return (sexpr)gr; }
/* ** alloc_map - allocate a "struct mapping". If we run short then we allocate ** some extra memory to hold some more. When we first started ** up we allocated a reasonably large chunk of the things so ** we shouldn't have to allocate extra very often. */ static struct mapping * alloc_map() { struct mapping * mp; if ((mp = mapfree) == NILMAP) { DPRINTF(0, ("alloc_map: allocating %d mapping structs\n", NMAPS)); mapfree = (struct mapping *) aalloc((vir_bytes) NMAPS_BYTES, 0); /* Make the linked list */ for (mp = mapfree; mp < mapfree + NMAPS - 1; mp++) mp->mp_next = mp + 1; mapfree[NMAPS - 1].mp_next = NILMAP; mp = mapfree; } assert(mp != NILMAP); mapfree = mapfree->mp_next; mp->mp_next = 0; return mp; }
void __rcall_wrapper(struct msg *msg) { struct vfs_obj *file; struct msg *reply; char *args; char *rets; if (!_di_rcall) { merror(msg); return; } file = vfs_get_index(RP_INDEX(msg->target)); if (!file) { merror(msg); return; } args = (char*) msg->data; rets = _di_rcall(msg->source, file, args); if (!rets) { merror(msg); return; } reply = aalloc(sizeof(struct msg) + strlen(rets) + 1, PAGESZ); reply->source = msg->target; reply->target = msg->source; reply->length = strlen(rets) + 1; reply->port = PORT_REPLY; reply->arch = ARCH_NAT; strcpy((char*) reply->data, rets); free(rets); free(msg); msend(reply); }
struct graph_node *graph_add_node(sexpr sx, sexpr label) { struct graph *gr = (struct graph *)sx_pointer(sx); static struct memory_pool pool = MEMORY_POOL_INITIALISER(sizeof (struct graph_node)); struct graph_node *node = (struct graph_node *) get_pool_mem(&pool); unsigned int size_before = get_chunked_node_size (gr->node_count), size_after = get_chunked_node_size (gr->node_count + 1); if (size_before != size_after) { gr->nodes = (struct graph_node **) arealloc (size_before, gr->nodes, size_after); } node->edge_count = 0; node->edges = (struct graph_edge **)aalloc (get_chunked_edge_size(0)); node->label = label; gr->nodes[gr->node_count] = node; gr->node_count++; return node; }
struct exec_context *execute(unsigned int options, char **command, char **environment) { static struct memory_pool pool = MEMORY_POOL_INITIALISER(sizeof(struct exec_context)); struct exec_context *context = (struct exec_context *)get_pool_mem(&pool); struct io *proc_stdout_in, *proc_stdout_out, *proc_stdin_in, *proc_stdin_out; context->in = (struct io *)0; context->out = (struct io *)0; if ((options & EXEC_CALL_NO_IO) == 0) { net_open_loop(&proc_stdout_in, &proc_stdout_out); net_open_loop(&proc_stdin_in, &proc_stdin_out); } context->status = ps_running; if (command == (char **)0) { context->pid = -1; } else { char *av; int alength = 0, elength = 0; char **argv = command; char *envx = (char *)0; char *lastbs; char *nq; int i, j, argvsize, envvsize, nqsize; STARTUPINFO s; PROCESS_INFORMATION p; for (i = 0; command[i]; i++); i++; argvsize = sizeof (char *) * i; argv = aalloc (argvsize); nqsize = i; nq = aalloc (nqsize); for (i = 0; command[i]; i++) { argv[i] = command[i]; } argv[i] = (char *)0; for (i = 0, lastbs = argv[0]; argv[0][i]; i++) { if (argv[0][i] == '\\') { lastbs = argv[0] + i + 1; } } argv[0] = lastbs; for (i = 0; argv[i]; i++) { nq[i] = 0; for (j = 0; argv[i][j]; j++) { if ((argv[i][j] == ' ') || (argv[i][j] == '\\')) { nq[i] = 1; } if (argv[i][j] == '"') { alength++; } alength++; } if (nq[i]) { /* one for the trailing space, two for the quotes */ alength += 3; } else { alength++; } } av = aalloc (alength); alength = 0; for (i = 0; argv[i]; i++) { if (alength > 0) av[alength-1] = ' '; if (nq[i]) { av[alength] = '"'; alength++; } for (j = 0; argv[i][j]; j++) { if (argv[i][j] == '"') { av[alength] = '\\'; alength++; } av[alength] = argv[i][j]; alength++; } if (nq[i]) { av[alength] = '"'; alength++; } av[alength] = 0; alength++; } if (environment != (char **)0) { for (i = 0; environment[i]; i++) { for (j = 0; environment[i][j]; j++) { elength++; } elength++; } envvsize = elength + 1; envx = aalloc (envvsize); elength = 0; for (i = 0; environment[i]; i++) { for (j = 0; environment[i][j]; j++) { envx[elength] = environment[i][j]; elength++; } envx[elength] = 0; elength++; } envx[elength] = 0; } memset (&s, 0, sizeof (s)); s.cb = sizeof (s); memset (&p, 0, sizeof (p)); if (((options & EXEC_CALL_NO_IO) == 0) && (proc_stdin_out != (void *)0) && (proc_stdout_in != (void *)0)) { s.dwFlags |= STARTF_USESTDHANDLES; s.hStdInput = proc_stdin_out->handle; s.hStdOutput = proc_stdout_in->handle; s.hStdError = proc_stdout_in->handle; } if (CreateProcessA(command[0], av, (void *)0, (void *)0, FALSE, /* CREATE_NEW_CONSOLE */ 0, envx, (void *)0, &s, &p)) { context->pid = p.dwProcessId; context->handle = p.hProcess; /* CloseHandle (p.hProcess);*/ CloseHandle (p.hThread); if ((options & EXEC_CALL_NO_IO) == 0) { io_close (proc_stdout_in); io_close (proc_stdin_out); context->in = proc_stdin_in; context->out = proc_stdout_out; } } else { if ((options & EXEC_CALL_NO_IO) == 0) { io_close (proc_stdout_in); io_close (proc_stdout_out); io_close (proc_stdin_in); io_close (proc_stdin_out); } context->in = (struct io *)0; context->out = (struct io *)0; context->status = ps_terminated; context->handle = (void *)0; context->pid = -1; } afree (argvsize, argv); afree (nqsize, nq); if (environment != (char **)0) { afree (envvsize, envx); } } return context; }
flatmemory() : memory(aalloc(MEMSIZE, FLAT_MEM_ALIGN)) { if (memory == NULL) fatalError("Failed to allocate memory\n"); };
NEWPREDcnt* CNewPredEncoder::initNEWPREDcnt( UInt uiVO_id) { int i, j; NEWPREDcnt* newpredCnt; newpredCnt = (NEWPREDcnt *) malloc(sizeof (NEWPREDcnt)); memset(newpredCnt, 0, sizeof(NEWPREDcnt)); m_iNumSlice = 1; for( i = 0; i < NP_MAX_NUMSLICE; i++ ) { if( m_piSlicePoint[i] < 0 ) break; m_iNumSlice++; } --m_iNumSlice; newpredCnt->NPRefBuf = (NEWPRED_buf***)aalloc(m_iNumSlice,m_iNumBuffEnc,sizeof(NEWPRED_buf*)); if (newpredCnt->NPRefBuf == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(NEWPRED_buf)\n"); return newpredCnt; } newpredCnt->ref = new int[m_iNumSlice]; if (newpredCnt->ref == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(ref)\n"); return newpredCnt; } memset(newpredCnt->ref, 0, sizeof(int)*m_iNumSlice); Int *iY = new int[m_iNumSlice]; m_iHMBNum = new int[m_iNumSlice]; for (i= 0; (i < m_iNumSlice); i++) { if (i+1 < m_iNumSlice) { iY[i] = *(m_piSlicePoint + i+1) - *(m_piSlicePoint + i); } else { iY[i] = (m_iNPNumMBX*m_iNPNumMBY) - *(m_piSlicePoint + i); } m_iHMBNum[i] = iY[i] / m_iNPNumMBX; if (m_iHMBNum[i] == 0) m_iHMBNum[i] = 1; } delete []iY; for (i= 0; (i < m_iNumSlice) && (*(m_piSlicePoint + i) >= 0); i++) { Int iWidthUV = m_iWidth/(MB_SIZE/BLOCK_SIZE); Int iHeightUV = (MB_SIZE * m_iHMBNum[i])/(MB_SIZE/BLOCK_SIZE); for (j= 0; j<m_iNumBuffEnc; j++) { newpredCnt->NPRefBuf[i][j] = new NEWPRED_buf; if (newpredCnt->NPRefBuf[i][j] == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(NEWPRED_buf)\n"); return newpredCnt; } newpredCnt->NPRefBuf[i][j]->vop_id = 0; newpredCnt->NPRefBuf[i][j]->iSizeY = (2*EXPANDY_REF_FRAME+m_iWidth)*(MB_SIZE* m_iHMBNum[i]); newpredCnt->NPRefBuf[i][j]->iSizeUV = (EXPANDY_REF_FRAME+iWidthUV)* iHeightUV; newpredCnt->NPRefBuf[i][j]->iSlice = i; newpredCnt->NPRefBuf[i][j]->pdata.pchY = new PixelC[newpredCnt->NPRefBuf[i][j]->iSizeY]; if (newpredCnt->NPRefBuf[i][j]->pdata.pchY == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(pchY)\n"); return newpredCnt; } newpredCnt->NPRefBuf[i][j]->pdata.pchU = new PixelC[newpredCnt->NPRefBuf[i][j]->iSizeUV]; if (newpredCnt->NPRefBuf[i][j]->pdata.pchU == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(pchU)\n"); return newpredCnt; } newpredCnt->NPRefBuf[i][j]->pdata.pchV = new PixelC[newpredCnt->NPRefBuf[i][j]->iSizeUV]; if (newpredCnt->NPRefBuf[i][j]->pdata.pchV == NULL) { fprintf(stderr, "initNEWPREDcnt: ERROR Memory allocate error(pchV)\n"); return newpredCnt; } memset(newpredCnt->NPRefBuf[i][j]->pdata.pchY, 0, newpredCnt->NPRefBuf[i][j]->iSizeY); memset(newpredCnt->NPRefBuf[i][j]->pdata.pchU, 0, newpredCnt->NPRefBuf[i][j]->iSizeUV); memset(newpredCnt->NPRefBuf[i][j]->pdata.pchV, 0, newpredCnt->NPRefBuf[i][j]->iSizeUV); } } return(newpredCnt); }
void *MPID_nem_ib_com_reg_mr_fetch(void *addr, long len, enum ibv_access_flags additional_flags, int mode) { #if 0 /* debug */ struct ibv_mr *mr; int ibcom_errno = MPID_nem_ib_com_reg_mr(addr, len, &mr); printf("mrcache,MPID_nem_ib_com_reg_mr,error,addr=%p,len=%d,lkey=%08x,rkey=%08x\n", addr, len, mr->lkey, mr->rkey); if (ibcom_errno != 0) { goto fn_fail; } fn_exit: return mr; fn_fail: goto fn_exit; #else int ibcom_errno; int key; struct MPID_nem_ib_com_reg_mr_cache_entry_t *e; static unsigned long long num_global_cache = 0ULL; #if 1 /*def HAVE_LIBDCFA */ /* we can't change addr because ibv_post_send assumes mr->host_addr (output of this function) * must have an exact mirror value of addr (input of this function) */ void *addr_aligned = addr; long len_aligned = len; #else void *addr_aligned = (void *) ((unsigned long) addr & ~(MPID_NEM_IB_COM_REG_MR_SZPAGE - 1)); long len_aligned = ((((unsigned long) addr + len) - (unsigned long) addr_aligned + MPID_NEM_IB_COM_REG_MR_SZPAGE - 1) & ~(MPID_NEM_IB_COM_REG_MR_SZPAGE - 1)); #endif key = MPID_nem_ib_com_hash_func(addr); dprintf("[MrCache] addr=%p, len=%ld\n", addr, len); dprintf("[MrCache] aligned addr=%p, len=%ld\n", addr_aligned, len_aligned); //__lru_queue_display(); int way = 0; for (e = (struct MPID_nem_ib_com_reg_mr_cache_entry_t *) MPID_nem_ib_com_reg_mr_cache[key].lru_next; e != (struct MPID_nem_ib_com_reg_mr_cache_entry_t *) &MPID_nem_ib_com_reg_mr_cache[key]; e = (struct MPID_nem_ib_com_reg_mr_cache_entry_t *) e->lru_next, way++) { //dprintf("e=%p, e->hash_next=%p\n", e, e->lru_next); if (e->addr <= addr_aligned && (uint8_t *) addr_aligned + len_aligned <= (uint8_t *) e->addr + e->len) { //dprintf //("MPID_nem_ib_com_reg_mr_fetch,hit,entry addr=%p,len=%d,mr addr=%p,len=%ld,requested addr=%p,len=%d\n", //e->addr, e->len, e->mr->addr, e->mr->length, addr, len); goto hit; } } // miss #if 0 // evict an entry and de-register its MR when the cache-set is full if (way > MPID_NEM_IB_COM_REG_MR_NWAY) { struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim = (struct MPID_nem_ib_com_reg_mr_cache_entry_t *) e->lru_prev; MPID_nem_ib_com_reg_mr_unlink((struct MPID_nem_ib_com_reg_mr_listnode_t *) victim); //dprintf("MPID_nem_ib_com_reg_mr,evict,entry addr=%p,len=%d,mr addr=%p,len=%ld\n", e->addr, e->len, //e->mr->addr, e->mr->length); ibcom_errno = MPID_nem_ib_com_dereg_mr(victim->mr); if (ibcom_errno) { printf("mrcache,MPID_nem_ib_com_dereg_mr\n"); goto fn_fail; } afree(victim, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); } #endif e = aalloc(sizeof(struct MPID_nem_ib_com_reg_mr_cache_entry_t), MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); /* reference counter is used when evicting entry */ e->refc = 1; dprintf("MPID_nem_ib_com_reg_mr_fetch,miss,addr=%p,len=%ld\n", addr_aligned, len_aligned); /* register memory */ ibcom_errno = MPID_nem_ib_com_reg_mr(addr_aligned, len_aligned, &e->mr, additional_flags); if (ibcom_errno != 0) { /* ib_com_reg_mr returns the errno of ibv_reg_mr */ if (ibcom_errno == ENOMEM) { #if 1 /* deregister memory region unused and re-register new one */ struct MPID_nem_ib_com_reg_mr_listnode_t *ptr; struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim; unsigned long long dereg_total = 0; int reg_success = 0; for (ptr = MPID_nem_ib_com_reg_mr_global_cache.lru_prev; ptr != (struct MPID_nem_ib_com_reg_mr_listnode_t *) &MPID_nem_ib_com_reg_mr_global_cache;) { victim = list_entry(ptr, struct MPID_nem_ib_com_reg_mr_cache_entry_t, g_lru); ptr = ptr->lru_prev; /* 'refc == 0' means this cache_entry is not used */ if (victim && victim->addr && (victim->refc == 0)) { MPID_nem_ib_com_reg_mr_unlink((struct MPID_nem_ib_com_reg_mr_listnode_t *) victim); MPID_nem_ib_com_reg_mr_unlink(&(victim->g_lru)); ibcom_errno = MPID_nem_ib_com_dereg_mr(victim->mr); if (ibcom_errno) { printf("mrcache,MPID_nem_ib_com_dereg_mr\n"); afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); goto fn_fail; } dereg_total += (unsigned long long) victim->len; afree(victim, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); num_global_cache--; /* end loop if the total length released exceeds the requested */ if (dereg_total > len_aligned) { dprintf("ib_com_reg_mr_fetch,dereg=%llu,len=%ld\n", dereg_total, len_aligned); /* re-registraion */ ibcom_errno = MPID_nem_ib_com_reg_mr(addr_aligned, len_aligned, &e->mr, additional_flags); if (ibcom_errno == 0) { /* ibv_reg_mr success */ reg_success = 1; break; } } } } if (reg_success == 0) { fprintf(stderr, "mrcache,MPID_nem_ib_com_reg_mr,failed\n"); afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); goto fn_fail; } #else /* deregister memory region. The value of 'num_global_cache' means the number of global-cached. * delete 5 percents of global-cached */ int i; int del_num = (num_global_cache + 19) / 20; struct MPID_nem_ib_com_reg_mr_cache_entry_t *victim; dprintf("mrcache,MPID_nem_ib_com_reg_mr,ENOMEM,del_num(%d)\n", del_num); for (i = 0; i < del_num; i++) { /* get LRU data from MPID_nem_ib_com_reg_mr_global_cache */ victim = list_entry(MPID_nem_ib_com_reg_mr_global_cache.lru_prev, struct MPID_nem_ib_com_reg_mr_cache_entry_t, g_lru); MPID_nem_ib_com_reg_mr_unlink((struct MPID_nem_ib_com_reg_mr_listnode_t *)victim); MPID_nem_ib_com_reg_mr_unlink(&(victim->g_lru)); ibcom_errno = MPID_nem_ib_com_dereg_mr(victim->mr); if (ibcom_errno) { printf("mrcache,MPID_nem_ib_com_dereg_mr\n"); afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); goto fn_fail; } afree(victim, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); num_global_cache--; } /* re-registraion */ ibcom_errno = MPID_nem_ib_com_reg_mr(addr_aligned, len_aligned, &e->mr, additional_flags); if (ibcom_errno != 0) { fprintf(stderr, "mrcache,MPID_nem_ib_com_reg_mr,retry,errno=%d\n", ibcom_errno); afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); goto fn_fail; } #endif } else { /* errno is not ENOMEM */ fprintf(stderr, "mrcache,MPID_nem_ib_com_reg_mr,errno=%d\n", ibcom_errno); afree(e, MPID_NEM_IB_COM_AALLOC_ID_MRCACHE); goto fn_fail; } }
static sexpr include (sexpr arguments, struct machine_state *st) { sexpr env = st->environment; define_string (str_slash, "/"); sexpr e = env, to = car (arguments), t = sx_join (webroot, str_slash, to), data = sx_end_of_list, lang, lcodes, te, tf, type = sx_nonexistent, lcc; struct sexpr_io *io; int len = 0, i = 0; const char *ts = sx_string (to); char *tmp; if (nexp (lx_environment_lookup (e, sym_original_name))) { e = lx_environment_bind (e, sym_original_name, car (arguments)); } if (truep (filep (t))) { return cons (e, cons (cons (sym_verbatim, cons (t, sx_end_of_list)), sx_end_of_list)); } te = sx_join (to, str_dot_ksu, str_nil); lcodes = (lx_environment_lookup (e, sym_language)); lcodes = sx_reverse (lcodes); lcodes = cons (str_dot, lcodes); lcodes = sx_reverse (lcodes); lcc = lcodes; while (consp (lcc)) { lang = car (lcc); t = sx_join (webroot, str_slash, sx_join (lang, str_slash, te)); if (truep (filep (t))) { sexpr v = lx_environment_lookup (e, sym_Vary); tf = lx_environment_lookup (e, sym_accept); e = lx_environment_bind (e, sym_base_name, to); if (!nexp (v)) { e = lx_environment_unbind (e, sym_Vary); e = lx_environment_bind (e, sym_Vary, sx_join (v, str_cAccept, sx_end_of_list)); } else { e = lx_environment_bind (e, sym_Vary, str_Accept); } if (!nexp (tf)) { tf = get_acceptable_type (tf); } else { tf = default_type; } goto include; } lcc = cdr (lcc); } while (ts[len] != (char)0) { if (ts[len] == '.') i = len; len++; } if (i > 0) { len = i; tmp = aalloc (len + 1); for (i = 0; i < len; i++) { tmp[i] = ts[i]; } tmp[i] = 0; i++; te = make_string (tmp); type = make_string (ts + i); afree (i, tmp); e = lx_environment_bind (e, sym_base_name, te); e = lx_environment_bind (e, sym_extension, type); te = sx_join (te, str_dot_ksu, str_nil); while (consp (lcodes)) { lang = car (lcodes); t = sx_join (webroot, str_slash, sx_join (lang, str_slash, te)); if (truep (filep (t))) { tf = lx_environment_lookup(mime_map, type); include: if (!nexp (tf)) { struct transdata td = { lx_environment_join (lx_environment_join (kho_environment, env), e), &data, 0 }; e = lx_environment_bind (e, sym_format, tf); io = sx_open_i (io_open_read (sx_string (t))); multiplex_add_sexpr (io, include_on_read, &td); do { multiplex (); } while (td.done == 0); return cons (e, sx_reverse (data)); } else { return cons (e, cons (cons (sym_object, cons (sym_verbatim, cons (t, sx_end_of_list))), sx_end_of_list)); } } lcodes = cdr (lcodes); } } if (!nexp (lx_environment_lookup (e, sym_error))) { return cons (sym_object, cons (cons (sym_error, cons (sym_file_not_found, sx_end_of_list)), sx_end_of_list)); } else { return sx_nonexistent; } }
/*===========================================================================* * pt_init * *===========================================================================*/ PUBLIC void pt_init(phys_bytes usedlimit) { /* By default, the kernel gives us a data segment with pre-allocated * memory that then can't grow. We want to be able to allocate memory * dynamically, however. So here we copy the part of the page table * that's ours, so we get a private page table. Then we increase the * hardware segment size so we can allocate memory above our stack. */ pt_t *newpt; int s, r; vir_bytes v, kpagedir; phys_bytes lo, hi; vir_bytes extra_clicks; u32_t moveup = 0; int global_bit_ok = 0; int free_pde; int p; vir_bytes kernlimit; vir_bytes sparepages_mem; phys_bytes sparepages_ph; /* Shorthand. */ newpt = &vmp->vm_pt; /* Get ourselves spare pages. */ if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES))) vm_panic("pt_init: aalloc for spare failed", NO_NUM); if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem, I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK) vm_panic("pt_init: sys_umap failed", r); for(s = 0; s < SPAREPAGES; s++) { sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE); sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE; } missing_spares = 0; /* global bit and 4MB pages available? */ global_bit_ok = _cpufeature(_CPUF_I386_PGE); bigpage_ok = _cpufeature(_CPUF_I386_PSE); /* Set bit for PTE's and PDE's if available. */ if(global_bit_ok) global_bit = I386_VM_GLOBAL; /* The kernel and boot time processes need an identity mapping. * We use full PDE's for this without separate page tables. * Figure out which pde we can start using for other purposes. */ id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE; /* We have to make mappings up till here. */ free_pde = id_map_high_pde+1; /* Initial (current) range of our virtual address space. */ lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys); hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys + vmp->vm_arch.vm_seg[S].mem_len); vm_assert(!(lo % I386_PAGE_SIZE)); vm_assert(!(hi % I386_PAGE_SIZE)); if(lo < VM_PROCSTART) { moveup = VM_PROCSTART - lo; vm_assert(!(VM_PROCSTART % I386_PAGE_SIZE)); vm_assert(!(lo % I386_PAGE_SIZE)); vm_assert(!(moveup % I386_PAGE_SIZE)); } /* Make new page table for ourselves, partly copied * from the current one. */ if(pt_new(newpt) != OK) vm_panic("pt_init: pt_new failed", NO_NUM); /* Set up mappings for VM process. */ for(v = lo; v < hi; v += I386_PAGE_SIZE) { phys_bytes addr; u32_t flags; /* We have to write the new position in the PT, * so we can move our segments. */ if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE, I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK) vm_panic("pt_init: pt_writemap failed", NO_NUM); } /* Move segments up too. */ vmp->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup); vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup); vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup); /* Allocate us a page table in which to remember page directory * pointers. */ if(!(page_directories = vm_allocpage(&page_directories_phys, VMP_PAGETABLE))) vm_panic("no virt addr for vm mappings", NO_NUM); memset(page_directories, 0, I386_PAGE_SIZE); /* Increase our hardware data segment to create virtual address * space above our stack. We want to increase it to VM_DATATOP, * like regular processes have. */ extra_clicks = ABS2CLICK(VM_DATATOP - hi); vmp->vm_arch.vm_seg[S].mem_len += extra_clicks; /* We pretend to the kernel we have a huge stack segment to * increase our data segment. */ vmp->vm_arch.vm_data_top = (vmp->vm_arch.vm_seg[S].mem_vir + vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT; /* Where our free virtual address space starts. * This is only a hint to the VM system. */ newpt->pt_virtop = 0; /* Let other functions know VM now has a private page table. */ vmp->vm_flags |= VMF_HASPT; /* Now reserve another pde for kernel's own mappings. */ { int kernmap_pde; phys_bytes addr, len; int flags, index = 0; u32_t offset = 0; kernmap_pde = free_pde++; offset = kernmap_pde * I386_BIG_PAGE_SIZE; while(sys_vmctl_get_mapping(index, &addr, &len, &flags) == OK) { vir_bytes vir; if(index >= MAX_KERNMAPPINGS) vm_panic("VM: too many kernel mappings", index); kern_mappings[index].phys_addr = addr; kern_mappings[index].len = len; kern_mappings[index].flags = flags; kern_mappings[index].lin_addr = offset; kern_mappings[index].flags = I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE | global_bit; if(flags & VMMF_UNCACHED) kern_mappings[index].flags |= I386_VM_PWT | I386_VM_PCD; if(addr % I386_PAGE_SIZE) vm_panic("VM: addr unaligned", addr); if(len % I386_PAGE_SIZE) vm_panic("VM: len unaligned", len); vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset); if(sys_vmctl_reply_mapping(index, vir) != OK) vm_panic("VM: reply failed", NO_NUM); offset += len; index++; kernmappings++; } } /* Find a PDE below processes available for mapping in the * page directories (readonly). */ pagedir_pde = free_pde++; pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE; /* Tell kernel about free pde's. */ while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) { if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) { vm_panic("VMCTL_I386_FREEPDE failed", r); } } /* first pde in use by process. */ proc_pde = free_pde; kernlimit = free_pde*I386_BIG_PAGE_SIZE; /* Increase kernel segment to address this memory. */ if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) { vm_panic("VMCTL_I386_KERNELLIMIT failed", r); } kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM], pagedir_pde*I386_BIG_PAGE_SIZE); /* Tell kernel how to get at the page directories. */ if((r=sys_vmctl(SELF, VMCTL_I386_PAGEDIRS, kpagedir)) != OK) { vm_panic("VMCTL_I386_KERNELLIMIT failed", r); } /* Give our process the new, copied, private page table. */ pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */ pt_bind(newpt, vmp); /* Now actually enable paging. */ if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK) vm_panic("pt_init: enable paging failed", NO_NUM); /* Back to reality - this is where the stack actually is. */ vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks; /* All OK. */ return; }
static sexpr pong (sexpr arguments, struct machine_state *st) { sexpr e = car (arguments), r, tf, v, ex, bn, on; if (!environmentp (e)) { e = lx_make_environment (sx_end_of_list); r = arguments; } else { r = cdr (arguments); } on = lx_environment_lookup (e, sym_original_name); ex = lx_environment_lookup (e, sym_extension); bn = lx_environment_lookup (e, sym_base_name); if (!nexp (on) && (nexp (ex) || nexp (bn))) { const char *ts = sx_string (on); char *tmp; int len = 0, i = 0; while (ts[len] != (char)0) { if (ts[len] == '.') i = len; len++; } if (i > 0) { len = i; tmp = aalloc (len + 1); for (i = 0; i < len; i++) { tmp[i] = ts[i]; } tmp[i] = 0; i++; bn = make_string (tmp); ex = make_string (ts + i); afree (i, tmp); if (!nexp (bn)) { lx_environment_unbind (e, sym_base_name); } if (!nexp (ex)) { lx_environment_unbind (e, sym_extension); } e = lx_environment_bind (e, sym_base_name, bn); e = lx_environment_bind (e, sym_extension, ex); } else { e = lx_environment_bind (e, sym_base_name, on); } } tf = lx_environment_lookup (e, sym_format); if (nexp (tf)) { tf = lx_environment_lookup (mime_map, ex); if (nexp (tf)) { tf = lx_environment_lookup (e, sym_accept); if (!nexp (tf)) { tf = get_acceptable_type (tf); } else { tf = default_type; } v = lx_environment_lookup (e, sym_Vary); if (!nexp (v)) { e = lx_environment_unbind (e, sym_Vary); e = lx_environment_bind (e, sym_Vary, sx_join (v, str_cAccept, sx_end_of_list)); } else { e = lx_environment_bind (e, sym_Vary, str_Accept); } } e = lx_environment_bind (e, sym_format, tf); } return sx_list2 (e, r); }
int bullet_init() { vir_bytes size; peer_bits test_var ; /* Initialize to logging */ bs_log_msgs=1 ; #ifndef USER_LEVEL bs_print_msgs = 0; #endif bs_debug_level = DEBUG_LEVEL ; /* Check that inode size is sensible - otherwise the compiler is odd */ if (sizeof (Inode) != INODE_SIZE) { bwarn("Inode size is %d, should be %d\nServer is inactive!", sizeof(Inode), INODE_SIZE); return 0; } /* Check that the peer bitmap can contain all members */ test_var=1 ; test_var <<= (S_MAXMEMBER-1) ; if ( test_var == 0 || test_var!= (1<<(S_MAXMEMBER-1)) ) { bwarn("type \"peer_bits\" is too small\nServer is inactive!"); return 0; } /* Figure out which vdisk to use and read the superblock */ if (get_disk_server_cap() == 0) return 0; /* Now that we know there is a disk we can announce ourselves */ message("BULLET SERVER INITIALIZATION"); /* * Set up pointers to beginning and end of buffer cache. * Make sure that our buffer cache is block aligned! The inode table * goes at the start of the cache memory and relies on blocksize * alignment. * NB: We must leave some memory free for other kernel threads and * possible user processes (such as soap). It leaves * BS_MEM_RESERVE bytes if no fixed cache size is defined. */ #ifdef BULLET_MEMSIZE size = BULLET_MEMSIZE; #else size = seg_maxfree(); if (size < BS_MEM_RESERVE) { bpanic( "bullet_init: memory reserve (0x%x) exceeds free memory (0x%x)\n", BS_MEM_RESERVE, size); } size -= BS_MEM_RESERVE; #endif /* BULLET_MEMSIZE */ /* * The following allocate may take a long time, especially when lots * of memory (32Mb :-) is involved. However, since kernel threads are * not preempted the enqueued interrupt routines never get a chance to * run, and eventually the interrupt queue will overflow causing a * panic. */ #ifndef USER_LEVEL disable(); #endif /* USER_LEVEL */ Bc_begin = (bufptr) aalloc(size, (int) Blksizemin1 + 1); #ifndef USER_LEVEL enable(); #endif /* USER_LEVEL */ Bc_end = Bc_begin + size; if (Bc_begin >= Bc_end) bpanic("bullet_init: no buffer cache"); /* Initialise resource allocation. NB: cache_mem is initially free! */ a_init(BYTES_TO_MEG(Bc_end - Bc_begin) + BLOCKS_TO_MEG(Superblock.s_numblocks, Blksizemin1 + 1)); if (a_begin(A_CACHE_MEM, (Res_addr) Bc_begin, (Res_addr) Bc_end) < 0) bpanic("Cannot begin resource allocator for buffer cache.\n"); /* Initialise buffer cache management */ cache_init(); Largest_file_size = (b_fsize) a_notused(A_CACHE_MEM); /* Print some cheery statistics about the current state of the server */ message("Buffer cache size = 0x%lx bytes", Bc_end - Bc_begin); message("Largest possible file size = 0x%lx bytes",Largest_file_size); /* Calculate the size of the super struct in local blocks */ /* It is the of disk blocks into a D_PHYSBLK */ if ( D_PHYS_SHIFT<=Superblock.s_blksize ) { /* Super info takes one block or less */ bs_super_size= 1 ; } else { /* To be honest, bs_supersize will always be one, because * D_PHYS_SHIFT seems to be the minimum. */ bwarn("Super block takes more then one block") ; bs_super_size= 1 << (D_PHYS_SHIFT-Superblock.s_blksize) ; } /* Super Block Lock */ mu_init(&SuperLock) ; /* Init group structures */ bs_grp_init() ; return 1 ; }