__isl_give isl_id *isl_id_alloc(isl_ctx *ctx, const char *name, void *user) { struct isl_hash_table_entry *entry; uint32_t id_hash; struct isl_name_and_user nu = { name, user }; if (!ctx) return NULL; id_hash = isl_hash_init(); if (name) id_hash = isl_hash_string(id_hash, name); else id_hash = isl_hash_builtin(id_hash, user); entry = isl_hash_table_find(ctx, &ctx->id_table, id_hash, isl_id_has_name_and_user, &nu, 1); if (!entry) return NULL; if (entry->data) return isl_id_copy(entry->data); entry->data = id_alloc(ctx, name, user); if (!entry->data) ctx->id_table.n--; return entry->data; }
static int devfs_create(struct vfs_node *parent, const char *name, uint32_t type, struct vfs_node **np) { int rc = -1; struct vfs_node *n; struct devfs_node *dn; ASSERT(parent->type == VFS_DIRECTORY); /* We don't support create directory for device file system */ if (!np || type == VFS_DIRECTORY) { rc = EINVAL; goto out; } DEBUG(DL_DBG, ("create(%s), type(%d).\n", name, type)); rc = devfs_node_create(&dn); if (rc != 0) { goto out; } n = vfs_node_alloc(parent->mount, type, parent->ops, NULL); if (!n) { rc = ENOMEM; goto out; } /* Add to our devfs nodes */ strncpy(dn->name, name, 127); dn->type = type; dn->ino = id_alloc(); dn->mask = 0755; dn->dev_id = 0; strncpy(n->name, name, 127); n->ino = dn->ino; n->length = 0; n->mask = 0755; n->data = NULL; vfs_node_refer(n); *np = n; rc = 0; out: return rc; }
int devfs_register(devfs_handle_t dir, const char *name, int flags, void *ops, dev_t dev_id) { int rc = -1; uint32_t type; struct devfs_node *dn; struct vfs_node *n = NULL; n = (struct vfs_node *)dir; if (n->type != VFS_DIRECTORY) { rc = EINVAL; DEBUG(DL_INF, ("register device on non-directory, node(%s:%d).\n", n->name, n->type)); goto out; } if (strcmp(n->mount->type->name, "devfs") != 0) { rc = EINVAL; DEBUG(DL_INF, ("register device on non-devfs, node(%s), fstype(%s).\n", n->name, n->mount->type->name)); goto out; } type = VFS_CHARDEVICE; // TODO: type should be retrieved from parameter rc = devfs_node_create(&dn); if (rc != 0) { DEBUG(DL_WRN, ("create devfs node failed, node(%s), name(%s).\n", n->name, name)); goto out; } strncpy(dn->name, name, 127); dn->type = type; dn->ino = id_alloc(); dn->mask = 0755; dn->dev_id = dev_id; rc = 0; out: if (rc != 0) { ; } return rc; }
/* * Initialize the log structure for a new zone. */ static void * log_zoneinit(zoneid_t zoneid) { int i; log_zone_t *lzp; if (zoneid == GLOBAL_ZONEID) lzp = &log_global; /* use statically allocated struct */ else lzp = kmem_zalloc(sizeof (log_zone_t), KM_SLEEP); for (i = 0; i < LOG_NUMCLONES; i++) { lzp->lz_clones[i].log_minor = (minor_t)id_alloc(log_minorspace); lzp->lz_clones[i].log_zoneid = zoneid; } return (lzp); }
/* * Allocate a log device corresponding to supplied device type. * Both devices are clonable. /dev/log devices are allocated per zone. * /dev/conslog devices are allocated from kmem cache. */ log_t * log_alloc(minor_t type) { zone_t *zptr = curproc->p_zone; log_zone_t *lzp; log_t *lp; int i; minor_t minor; if (type == LOG_CONSMIN) { /* * Return a write-only /dev/conslog device. * No point allocating log_t until there's a free minor number. */ minor = (minor_t)id_alloc(log_minorspace); lp = kmem_cache_alloc(log_cons_cache, KM_SLEEP); lp->log_minor = minor; return (lp); } else { ASSERT(type == LOG_LOGMIN); lzp = zone_getspecific(log_zone_key, zptr); ASSERT(lzp != NULL); /* search for an available /dev/log device for the zone */ for (i = LOG_LOGMINIDX; i <= LOG_LOGMAXIDX; i++) { lp = &lzp->lz_clones[i]; if (lp->log_inuse == 0) break; } if (i > LOG_LOGMAXIDX) lp = NULL; else /* Indicate which device type */ lp->log_major = LOG_LOGMIN; return (lp); } }
/* * void task_init(void) * * Overview * task_init() initializes task-related hashes, caches, and the task id * space. Additionally, task_init() establishes p0 as a member of task0. * Called by main(). * * Return values * None. * * Caller's context * task_init() must be called prior to MP startup. */ void task_init(void) { proc_t *p = &p0; mod_hash_hndl_t hndl; rctl_set_t *set; rctl_alloc_gp_t *gp; rctl_entity_p_t e; /* * Initialize task_cache and taskid_space. */ task_cache = kmem_cache_create("task_cache", sizeof (task_t), 0, NULL, NULL, NULL, NULL, NULL, 0); taskid_space = id_space_create("taskid_space", 0, MAX_TASKID); /* * Initialize task hash table. */ task_hash = mod_hash_create_idhash("task_hash", task_hash_size, mod_hash_null_valdtor); /* * Initialize task-based rctls. */ rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &task_lwps_ops); rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &task_procs_ops); rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE | RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX, UINT64_MAX, &task_cpu_time_ops); /* * Create task0 and place p0 in it as a member. */ task0p = kmem_cache_alloc(task_cache, KM_SLEEP); bzero(task0p, sizeof (task_t)); task0p->tk_tkid = id_alloc(taskid_space); task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); task0p->tk_proj = project_hold_by_id(0, &zone0, PROJECT_HOLD_INSERT); task0p->tk_flags = TASK_NORMAL; task0p->tk_nlwps = p->p_lwpcnt; task0p->tk_nprocs = 1; task0p->tk_zone = global_zone; task0p->tk_commit_next = NULL; set = rctl_set_create(); gp = rctl_set_init_prealloc(RCENTITY_TASK); mutex_enter(&curproc->p_lock); e.rcep_p.task = task0p; e.rcep_t = RCENTITY_TASK; task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp); mutex_exit(&curproc->p_lock); rctl_prealloc_destroy(gp); (void) mod_hash_reserve(task_hash, &hndl); mutex_enter(&task_hash_lock); ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL); if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)task0p->tk_tkid, (mod_hash_val_t *)task0p, hndl) != 0) { mod_hash_cancel(task_hash, &hndl); panic("unable to insert task %d(%p)", task0p->tk_tkid, (void *)task0p); } mutex_exit(&task_hash_lock); task0p->tk_memb_list = p; task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone); /* * Initialize task pointers for p0, including doubly linked list of task * members. */ p->p_task = task0p; p->p_taskprev = p->p_tasknext = p; task_hold(task0p); }
/* * task_t *task_create(projid_t, zone *) * * Overview * A process constructing a new task calls task_create() to construct and * preinitialize the task for the appropriate destination project. Only one * task, the primordial task0, is not created with task_create(). * * Return values * None. * * Caller's context * Caller's context should be safe for KM_SLEEP allocations. * The caller should appropriately bump the kpj_ntasks counter on the * project that contains this task. */ task_t * task_create(projid_t projid, zone_t *zone) { task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP); task_t *ancestor_tk; taskid_t tkid; task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); mod_hash_hndl_t hndl; rctl_set_t *set = rctl_set_create(); rctl_alloc_gp_t *gp; rctl_entity_p_t e; bzero(tk, sizeof (task_t)); tk->tk_tkid = tkid = id_alloc(taskid_space); tk->tk_nlwps = 0; tk->tk_nlwps_ctl = INT_MAX; tk->tk_nprocs = 0; tk->tk_nprocs_ctl = INT_MAX; tk->tk_usage = tu; tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT); tk->tk_flags = TASK_NORMAL; tk->tk_commit_next = NULL; /* * Copy ancestor task's resource controls. */ zone_task_hold(zone); mutex_enter(&curproc->p_lock); ancestor_tk = curproc->p_task; task_hold(ancestor_tk); tk->tk_zone = zone; mutex_exit(&curproc->p_lock); for (;;) { gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls); mutex_enter(&ancestor_tk->tk_rctls->rcs_lock); if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp)) break; mutex_exit(&ancestor_tk->tk_rctls->rcs_lock); rctl_prealloc_destroy(gp); } /* * At this point, curproc does not have the appropriate linkage * through the task to the project. So, rctl_set_dup should only * copy the rctls, and leave the callbacks for later. */ e.rcep_p.task = tk; e.rcep_t = RCENTITY_TASK; tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e, set, gp, RCD_DUP); mutex_exit(&ancestor_tk->tk_rctls->rcs_lock); rctl_prealloc_destroy(gp); /* * Record the ancestor task's ID for use by extended accounting. */ tu->tu_anctaskid = ancestor_tk->tk_tkid; task_rele(ancestor_tk); /* * Put new task structure in the hash table. */ (void) mod_hash_reserve(task_hash, &hndl); mutex_enter(&task_hash_lock); ASSERT(task_find(tkid, zone->zone_id) == NULL); if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid, (mod_hash_val_t *)tk, hndl) != 0) { mod_hash_cancel(task_hash, &hndl); panic("unable to insert task %d(%p)", tkid, (void *)tk); } mutex_exit(&task_hash_lock); tk->tk_nprocs_kstat = task_kstat_create(tk, zone); return (tk); }