struct psc_memnode * psc_memnode_get(void) { struct psc_memnode *pmn, **pmnv; int memnid, rc; pmn = pthread_getspecific(psc_memnodes_key); if (pmn) return (pmn); memnid = psc_memnode_getid(); spinlock(&psc_memnodes_lock); if (psc_dynarray_ensurelen(&psc_memnodes, memnid + 1) == -1) psc_fatalx("ensurelen"); pmnv = psc_dynarray_get_mutable(&psc_memnodes); pmn = pmnv[memnid]; if (pmn == NULL) { pmn = psc_alloc(sizeof(*pmn), PAF_NOLOG); INIT_SPINLOCK(&pmn->pmn_lock); psc_dynarray_init(&pmn->pmn_keys); rc = pthread_setspecific(psc_memnodes_key, pmn); if (rc) psc_fatalx("pthread_setspecific: %s", strerror(rc)); psc_dynarray_setpos(&psc_memnodes, memnid, pmn); } freelock(&psc_memnodes_lock); return (pmn); }
void add_endpoint(UsbDevice * udev, EndpointDesc * endpoint_desc) { List * endpointListHook; EndpointHandle * endpointHandle; endpointHandle = ALLOC(sizeof(EndpointHandle)); endpointHandle->process = create_process(udev->workqueue); endpointHandle->endpoint = ALLOC(sizeof(Endpoint)); endpointHandle->endpoint->p_endpoint_desc = endpoint_desc; endpointHandle->endpoint->timeout = STD_TIMEOUT; INIT_SPINLOCK(&endpointHandle->endpoint->lock); endpointHandle->endpoint->complete_transfers = NULL; endpointHandle->endpoint->pending_transfers = NULL; endpointHandle->callback = NULL; endpointHandle->context = NULL; endpointListHook = ALLOC(sizeof(List)); endpointListHook->data = endpointHandle; pushBack(&udev->endpoints_handles, endpointListHook); }
void psc_compl_init(struct psc_compl *pc) { memset(pc, 0, sizeof(*pc)); INIT_SPINLOCK(&pc->pc_lock); pfl_waitq_init(&pc->pc_wq, "completion"); }
void mds_bmap_timeotbl_init(void) { INIT_SPINLOCK(&mdsBmapTimeoTbl.btt_lock); pll_init(&mdsBmapTimeoTbl.btt_leases, struct bmap_mds_lease, bml_timeo_lentry, &mdsBmapTimeoTbl.btt_lock); }
void pfl_odt_load(struct pfl_odt **tp, struct pfl_odt_ops *odtops, int oflg, const char *fn, const char *fmt, ...) { struct pfl_odt_hdr *h; struct pfl_odt *t; uint64_t crc; va_list ap; *tp = t = PSCALLOC(sizeof(*t)); t->odt_ops = *odtops; INIT_SPINLOCK(&t->odt_lock); INIT_PSC_LISTENTRY(&t->odt_lentry); va_start(ap, fmt); vsnprintf(t->odt_name, sizeof(t->odt_name), fmt, ap); va_end(ap); t->odt_iostats.rd = pfl_opstat_init("odt-%s-rd", t->odt_name); t->odt_iostats.wr = pfl_opstat_init("odt-%s-wr", t->odt_name); h = t->odt_hdr = PSCALLOC(sizeof(*h)); /* pfl_odt_open() and slm_odt_open() */ odtops->odtop_open(t, fn, oflg); psc_crc64_calc(&crc, t->odt_hdr, sizeof(*t->odt_hdr) - sizeof(t->odt_hdr->odth_crc)); pfl_assert(h->odth_crc == crc); t->odt_bitmap = psc_vbitmap_newf(h->odth_nitems, PVBF_AUTO); pfl_assert(t->odt_bitmap); /* * Skip the first slot, so that we can detect whether we have * assigned a lease easily. */ psc_vbitmap_set(t->odt_bitmap, 0); PFLOG_ODT(PLL_DIAG, t, "loaded"); pll_add(&pfl_odtables, t); }
void _pll_initf(struct psc_lockedlist *pll, int offset, psc_spinlock_t *lkp, int flags) { memset(pll, 0, sizeof(*pll)); INIT_PSCLIST_HEAD(&pll->pll_listhd); pll->pll_flags = flags; if (lkp) { pll->pll_flags |= PLLF_EXTLOCK; pll->pll_lockp = lkp; } else { if (flags & PLLF_LOGTMP) INIT_SPINLOCK_LOGTMP(&pll->pll_lock); else if (flags & PLLF_NOLOG) INIT_SPINLOCK_NOLOG(&pll->pll_lock); else INIT_SPINLOCK(&pll->pll_lock); } pll->pll_offset = offset; }
int pfl_odt_create(const char *fn, int64_t nitems, size_t itemsz, int overwrite, size_t startoff, size_t pad, int tflg) { int rc; int64_t item; struct pfl_odt_slotftr f; struct pfl_odt_hdr *h; struct pfl_odt *t; t = PSCALLOC(sizeof(*t)); t->odt_ops = pfl_odtops; INIT_SPINLOCK(&t->odt_lock); snprintf(t->odt_name, sizeof(t->odt_name), "%s", pfl_basename(fn)); t->odt_iostats.rd = pfl_opstat_init("odt-%s-rd", t->odt_name); t->odt_iostats.wr = pfl_opstat_init("odt-%s-wr", t->odt_name); h = PSCALLOC(sizeof(*h)); memset(h, 0, sizeof(*h)); h->odth_nitems = nitems; h->odth_itemsz = itemsz; h->odth_slotsz = itemsz + pad + sizeof(f); h->odth_options = tflg; h->odth_start = startoff; t->odt_hdr = h; psc_crc64_calc(&h->odth_crc, h, sizeof(*h) - sizeof(h->odth_crc)); /* pfl_odt_new() and slm_odt_new() */ rc = t->odt_ops.odtop_new(t, fn, overwrite); if (rc) return (rc); for (item = 0; item < nitems; item++) _pfl_odt_doput(t, item, NULL, &f, 0); PFLOG_ODT(PLL_DIAG, t, "created"); pfl_odt_release(t); return (0); }
/** * psc_fault_register - */ struct psc_fault * _psc_fault_register(const char *name) { struct psc_fault *pflt; char *p; pflt = psc_fault_lookup(name); psc_assert(pflt == NULL); p = strstr(name, "_FAULT_"); psc_assert(p); psc_assert(strlen(p + 7) < sizeof(pflt->pflt_name)); /* expected format: <daemon>_FAULT_<fault-name> */ pflt = &psc_faults[psc_nfaults++]; INIT_SPINLOCK(&pflt->pflt_lock); strlcpy(pflt->pflt_name, p + 7, sizeof(pflt->pflt_name)); for (p = pflt->pflt_name; *p; p++) *p = tolower(*p); pflt->pflt_chance = 100; pflt->pflt_count = -1; return (pflt); }
/* * Lookup and optionally create a new bmap structure. * @f: file's bmap tree to search. * @n: bmap index number to search for. * @new_bmap: whether to allow creation and also value-result of whether * it was newly created or not. */ struct bmap * bmap_lookup_cache(struct fidc_membh *f, sl_bmapno_t n, int bmaprw, int *new_bmap) { struct bmap lb, *b, *bnew = NULL; int doalloc; doalloc = *new_bmap; lb.bcm_bmapno = n; restart: if (bnew) pfl_rwlock_wrlock(&f->fcmh_rwlock); else pfl_rwlock_rdlock(&f->fcmh_rwlock); b = RB_FIND(bmaptree, &f->fcmh_bmaptree, &lb); if (b) { if (!BMAP_TRYLOCK(b)) { pfl_rwlock_unlock(&f->fcmh_rwlock); usleep(10); goto restart; } if (b->bcm_flags & BMAPF_TOFREE) { /* * This bmap is going away; wait for it so we * can reload it back. */ DEBUG_BMAP(PLL_DIAG, b, "wait on to-free bmap"); BMAP_ULOCK(b); /* * We don't want to spin if we are waiting for a * flush to clear. */ psc_waitq_waitrelf_us(&f->fcmh_waitq, PFL_LOCKPRIMT_RWLOCK, &f->fcmh_rwlock, 100); goto restart; } bmap_op_start_type(b, BMAP_OPCNT_LOOKUP); } if (doalloc == 0 || b) { pfl_rwlock_unlock(&f->fcmh_rwlock); if (bnew) psc_pool_return(bmap_pool, bnew); *new_bmap = 0; OPSTAT_INCR("bmapcache.hit"); return (b); } if (bnew == NULL) { pfl_rwlock_unlock(&f->fcmh_rwlock); if (sl_bmap_ops.bmo_reapf) sl_bmap_ops.bmo_reapf(); bnew = psc_pool_get(bmap_pool); goto restart; } b = bnew; OPSTAT_INCR("bmapcache.miss"); *new_bmap = 1; memset(b, 0, bmap_pool->ppm_master->pms_entsize); INIT_PSC_LISTENTRY(&b->bcm_lentry); INIT_SPINLOCK(&b->bcm_lock); psc_atomic32_set(&b->bcm_opcnt, 0); b->bcm_fcmh = f; b->bcm_bmapno = n; /* * Signify that the bmap is newly initialized and therefore may * not contain certain structures. */ psc_assert(bmaprw == BMAPF_RD || bmaprw == BMAPF_WR); b->bcm_flags = bmaprw; bmap_op_start_type(b, BMAP_OPCNT_LOOKUP); /* * Perform app-specific substructure initialization, which is * msl_bmap_init(), iod_bmap_init(), or mds_bmap_init(). */ sl_bmap_ops.bmo_init_privatef(b); /* Add to the fcmh's bmap cache */ PSC_RB_XINSERT(bmaptree, &f->fcmh_bmaptree, b); pfl_rwlock_unlock(&f->fcmh_rwlock); fcmh_op_start_type(f, FCMH_OPCNT_BMAP); BMAP_LOCK(b); return (b); }
* * Notes: No warranty expressed or implied. Use at own risk. */ #include "kernel/kernel.h" // Ring buffer to hold raw scan codes. Insert at head, remove at // tail, drop chars if buffer is full. static uint8 kbd_buffer[KBD_BUFFER_SIZE]; static int buf_head = 0; static int buf_tail = 0; // Current keyboard state (bit flags) //static uint16 kbd_flags = 0; // Synchronize access between ISR and higher level functions. // Used to guard access to ring-buffer and flags. static spinlock kbd_lock = INIT_SPINLOCK("kbd"); /* KBDUS means US Keyboard Layout. This is a scancode table * used to layout a standard US keyboard. I have left some * comments in to give you an idea of what key is what, even * though I set it's array index to 0. You can change that to * whatever you want using a macro, if you wish! */ unsigned char kbdus[128] = { 0, 27, '1', '2', '3', '4', '5', '6', '7', '8', /* 9 */ '9', '0', '-', '=', '\b', /* Backspace */ '\t', /* Tab */ 'q', 'w', 'e', 'r', /* 19 */ 't', 'y', 'u', 'i', 'o', 'p', '[', ']', '\n', /* Enter key */ 0, /* 29 - Control */ 'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', /* 39 */
#include <types.h> #include <errno.h> #include <console.h> #include <kernel.h> #include <macros.h> #include <mm/kmalloc.h> #include <mm/pages.h> #include <lib/string.h> #include <arch/spinlock.h> #include <arch/mm/config.h> static uint32_t used_pages = 0; static uint32_t alloc_size = 0; static spinlock_t kmalloc_lock = INIT_SPINLOCK("kmalloc"); static kmalloc_block_t* root = NULL; static inline int kmalloc_block_validate(kmalloc_block_t* block) { return (block->magic == KMALLOC_BLOCK_MAGIC); } static inline int kmalloc_chunk_validate(kmalloc_chunk_t* chunk) { return ((chunk->magic & 0xFFFFFFF0) == KMALLOC_CHUNK_MAGIC); } static inline void kmalloc_chunk_set_free(kmalloc_chunk_t* chunk, int free) { if (free) { chunk->magic |= CHUNK_FREE; } else { chunk->magic &= ~CHUNK_FREE;