/* *'expire_time' argument is an absolute clock time in nanoseconds. * Return value is time left (expire_time - now) or -1 if timeout occurred. */ static clock_t __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time, int state) { DEFINE_WAIT(wait); hrtime_t time_left, now; unsigned long time_left_us; SENTRY; ASSERT(cvp); ASSERT(mp); ASSERT(cvp->cv_magic == CV_MAGIC); ASSERT(mutex_owned(mp)); atomic_inc(&cvp->cv_refs); if (cvp->cv_mutex == NULL) cvp->cv_mutex = mp; /* Ensure the same mutex is used by all callers */ ASSERT(cvp->cv_mutex == mp); now = gethrtime(); time_left = expire_time - now; if (time_left <= 0) { atomic_dec(&cvp->cv_refs); SRETURN(-1); } time_left_us = time_left / NSEC_PER_USEC; prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); atomic_inc(&cvp->cv_waiters); /* Mutex should be dropped after prepare_to_wait() this * ensures we're linked in to the waiters list and avoids the * race where 'cvp->cv_waiters > 0' but the list is empty. */ mutex_exit(mp); /* Allow a 100 us range to give kernel an opportunity to coalesce * interrupts */ usleep_range(time_left_us, time_left_us + 100); mutex_enter(mp); /* No more waiters a different mutex could be used */ if (atomic_dec_and_test(&cvp->cv_waiters)) { cvp->cv_mutex = NULL; wake_up(&cvp->cv_destroy); } finish_wait(&cvp->cv_event, &wait); atomic_dec(&cvp->cv_refs); time_left = expire_time - gethrtime(); SRETURN(time_left > 0 ? time_left : -1); }
/* thread_create() may block forever if it cannot create a thread or * allocate memory. This is preferable to returning a NULL which Solaris * style callers likely never check for... since it can't fail. */ kthread_t * __thread_create(caddr_t stk, size_t stksize, thread_func_t func, const char *name, void *args, size_t len, proc_t *pp, int state, pri_t pri) { thread_priv_t *tp; struct task_struct *tsk; char *p; SENTRY; /* Option pp is simply ignored */ /* Variable stack size unsupported */ ASSERT(stk == NULL); tp = kmem_alloc(sizeof(thread_priv_t), KM_SLEEP); if (tp == NULL) SRETURN(NULL); tp->tp_magic = TP_MAGIC; tp->tp_name_size = strlen(name) + 1; tp->tp_name = kmem_alloc(tp->tp_name_size, KM_SLEEP); if (tp->tp_name == NULL) { kmem_free(tp, sizeof(thread_priv_t)); SRETURN(NULL); } strncpy(tp->tp_name, name, tp->tp_name_size); /* Strip trailing "_thread" from passed name which will be the func * name since the exposed API has no parameter for passing a name. */ p = strstr(tp->tp_name, "_thread"); if (p) p[0] = '\0'; tp->tp_func = func; tp->tp_args = args; tp->tp_len = len; tp->tp_state = state; tp->tp_pri = pri; tsk = kthread_create(thread_generic_wrapper, (void *)tp, "%s", tp->tp_name); if (IS_ERR(tsk)) { SERROR("Failed to create thread: %ld\n", PTR_ERR(tsk)); SRETURN(NULL); } wake_up_process(tsk); SRETURN((kthread_t *)tsk); }
int zlib_init(void) { int size; SENTRY; size = MAX(spl_zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); zlib_workspace_cache = kmem_cache_create("spl_zlib_workspace_cache", size, 0, NULL, NULL, NULL, NULL, NULL, KMC_VMEM); if (!zlib_workspace_cache) SRETURN(1); SRETURN(0); }
/* 'expire_time' argument is an absolute wall clock time in jiffies. * Return value is time left (expire_time - now) or -1 if timeout occurred. */ static clock_t __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time, int state) { DEFINE_WAIT(wait); clock_t time_left; SENTRY; ASSERT(cvp); ASSERT(mp); ASSERT(cvp->cv_magic == CV_MAGIC); ASSERT(mutex_owned(mp)); atomic_inc(&cvp->cv_refs); if (cvp->cv_mutex == NULL) cvp->cv_mutex = mp; /* Ensure the same mutex is used by all callers */ ASSERT(cvp->cv_mutex == mp); /* XXX - Does not handle jiffie wrap properly */ time_left = expire_time - jiffies; if (time_left <= 0) { atomic_dec(&cvp->cv_refs); SRETURN(-1); } prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); atomic_inc(&cvp->cv_waiters); /* Mutex should be dropped after prepare_to_wait() this * ensures we're linked in to the waiters list and avoids the * race where 'cvp->cv_waiters > 0' but the list is empty. */ mutex_exit(mp); time_left = schedule_timeout(time_left); mutex_enter(mp); /* No more waiters a different mutex could be used */ if (atomic_dec_and_test(&cvp->cv_waiters)) { cvp->cv_mutex = NULL; wake_up(&cvp->cv_destroy); } finish_wait(&cvp->cv_event, &wait); atomic_dec(&cvp->cv_refs); SRETURN(time_left > 0 ? time_left : -1); }
int spl_proc_init(void) { int rc = 0; SENTRY; #ifdef CONFIG_SYSCTL spl_header = spl_register_sysctl_table(spl_root, 0); if (spl_header == NULL) SRETURN(-EUNATCH); #endif /* CONFIG_SYSCTL */ proc_spl = proc_mkdir("spl", NULL); if (proc_spl == NULL) SGOTO(out, rc = -EUNATCH); #ifdef DEBUG_KMEM proc_spl_kmem = proc_mkdir("kmem", proc_spl); if (proc_spl_kmem == NULL) SGOTO(out, rc = -EUNATCH); proc_spl_kmem_slab = create_proc_entry("slab", 0444, proc_spl_kmem); if (proc_spl_kmem_slab == NULL) SGOTO(out, rc = -EUNATCH); proc_spl_kmem_slab->proc_fops = &proc_slab_operations; #endif /* DEBUG_KMEM */ proc_spl_kstat = proc_mkdir("kstat", proc_spl); if (proc_spl_kstat == NULL) SGOTO(out, rc = -EUNATCH); out: if (rc) { remove_proc_entry("kstat", proc_spl); #ifdef DEBUG_KMEM remove_proc_entry("slab", proc_spl_kmem); remove_proc_entry("kmem", proc_spl); #endif remove_proc_entry("spl", NULL); #ifdef CONFIG_SYSCTL spl_unregister_sysctl_table(spl_header); #endif /* CONFIG_SYSCTL */ } SRETURN(rc); }
static void * slab_seq_next(struct seq_file *f, void *p, loff_t *pos) { spl_kmem_cache_t *skc = p; SENTRY; ++*pos; SRETURN((skc->skc_list.next == &spl_kmem_cache_list) ? NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list)); }
static void * slab_seq_start(struct seq_file *f, loff_t *pos) { struct list_head *p; loff_t n = *pos; SENTRY; down_read(&spl_kmem_cache_sem); if (!n) slab_seq_show_headers(f); p = spl_kmem_cache_list.next; while (n--) { p = p->next; if (p == &spl_kmem_cache_list) SRETURN(NULL); } SRETURN(list_entry(p, spl_kmem_cache_t, skc_list)); }
int highbit(unsigned long i) { register int h = 1; SENTRY; if (i == 0) SRETURN(0); #if BITS_PER_LONG == 64 if (i & 0xffffffff00000000ul) { h += 32; i >>= 32; }