int main(int argc, char **argv) { int ch; char **pkgs, **start; char *pkgs_split; warnpkgng(); whead = malloc(sizeof(struct which_head)); if (whead == NULL) err(2, NULL); TAILQ_INIT(whead); pkgs = start = argv; if (argc == 1) { MatchType = MATCH_ALL; Flags = SHOW_INDEX; } else while ((ch = getopt_long(argc, argv, opts, longopts, NULL)) != -1) { switch(ch) { case 'a': MatchType = MATCH_ALL; break; case 'b': UseBlkSz = TRUE; break; case 'v': Verbose++; /* Reasonable definition of 'everything' */ Flags = SHOW_COMMENT | SHOW_DESC | SHOW_PLIST | SHOW_INSTALL | SHOW_DEINSTALL | SHOW_REQUIRE | SHOW_DISPLAY | SHOW_MTREE; break; case 'E': Flags |= SHOW_PKGNAME; break; case 'I': Flags |= SHOW_INDEX; break; case 'p': Flags |= SHOW_PREFIX; break; case 'c': Flags |= SHOW_COMMENT; break; case 'd': Flags |= SHOW_DESC; break; case 'D': Flags |= SHOW_DISPLAY; break; case 'f': Flags |= SHOW_PLIST; break; case 'g': Flags |= SHOW_CKSUM; break; case 'G': MatchType = MATCH_EXACT; break; case 'i': Flags |= SHOW_INSTALL; break; case 'j': Flags |= SHOW_REQUIRE; break; case 'k': Flags |= SHOW_DEINSTALL; break; case 'K': KeepPackage = TRUE; break; case 'r': Flags |= SHOW_DEPEND; break; case 'R': Flags |= SHOW_REQBY; break; case 'L': Flags |= SHOW_FILES; break; case 'm': Flags |= SHOW_MTREE; break; case 's': Flags |= SHOW_SIZE; break; case 'o': Flags |= SHOW_ORIGIN; break; case 'O': LookUpOrigin = strdup(optarg); if (LookUpOrigin == NULL) err(2, NULL); break; case 'V': Flags |= SHOW_FMTREV; break; case 'l': InfoPrefix = optarg; break; case 'q': Quiet = TRUE; break; case 'Q': Quiet = TRUE; QUIET = TRUE; break; case 't': strlcpy(PlayPen, optarg, sizeof(PlayPen)); break; case 'x': MatchType = MATCH_REGEX; break; case 'X': MatchType = MATCH_EREGEX; break; case 'e': CheckPkg = optarg; break; case 'W': { struct which_entry *entp; entp = calloc(1, sizeof(struct which_entry)); if (entp == NULL) err(2, NULL); strlcpy(entp->file, optarg, PATH_MAX); entp->skip = FALSE; TAILQ_INSERT_TAIL(whead, entp, next); break; } case 'P': Flags = SHOW_PTREV; break; case 'h': default: usage(); break; } } argc -= optind; argv += optind; if (Flags & SHOW_PTREV) { if (!Quiet) printf("Package tools revision: "); printf("%d\n", PKG_INSTALL_VERSION); exit(0); } /* Set some reasonable defaults */ if (!Flags) Flags = SHOW_COMMENT | SHOW_DESC | SHOW_REQBY; /* Get all the remaining package names, if any */ while (*argv) { /* * Don't try to apply heuristics if arguments are regexs or if * the argument refers to an existing file. */ if (MatchType != MATCH_REGEX && MatchType != MATCH_EREGEX && !isfile(*argv) && !isURL(*argv)) while ((pkgs_split = strrchr(*argv, (int)'/')) != NULL) { *pkgs_split++ = '\0'; /* * If character after the '/' is alphanumeric or shell * metachar, then we've found the package name. Otherwise * we've come across a trailing '/' and need to continue our * quest. */ if (isalnum(*pkgs_split) || ((MatchType == MATCH_GLOB) && \ strpbrk(pkgs_split, "*?[]") != NULL)) { *argv = pkgs_split; break; } } *pkgs++ = *argv++; } /* If no packages, yelp */ if (pkgs == start && MatchType != MATCH_ALL && !CheckPkg && TAILQ_EMPTY(whead) && LookUpOrigin == NULL) warnx("missing package name(s)"), usage(); *pkgs = NULL; return pkg_perform(start); }
void req_recv_done(struct context *ctx, struct conn *conn, struct msg *msg, struct msg *nmsg) { rstatus_t status; struct server_pool *pool; struct msg_tqh frag_msgq; struct msg *sub_msg; struct msg *tmsg; /* tmp next message */ ASSERT(conn->client && !conn->proxy); ASSERT(msg->request); ASSERT(msg->owner == conn); ASSERT(conn->rmsg == msg); ASSERT(nmsg == NULL || nmsg->request); /* enqueue next message (request), if any */ conn->rmsg = nmsg; if (req_filter(ctx, conn, msg)) { return; } if (msg->noforward) { status = req_make_reply(ctx, conn, msg); if (status != NC_OK) { conn->err = errno; return; } status = msg->reply(ctx, msg); if (status != NC_OK) { conn->err = errno; return; } status = event_add_out(ctx->evb, conn); if (status != NC_OK) { conn->err = errno; } return; } /* do fragment */ pool = conn->owner; TAILQ_INIT(&frag_msgq); status = msg->fragment(msg, pool->ncontinuum, &frag_msgq); if (status != NC_OK) { if (!msg->noreply) { conn->enqueue_outq(ctx, conn, msg); } req_forward_error(ctx, conn, msg); } /* if no fragment happened */ if (TAILQ_EMPTY(&frag_msgq)) { req_forward(ctx, conn, msg); return; } status = req_make_reply(ctx, conn, msg); if (status != NC_OK) { if (!msg->noreply) { conn->enqueue_outq(ctx, conn, msg); } req_forward_error(ctx, conn, msg); } for (sub_msg = TAILQ_FIRST(&frag_msgq); sub_msg != NULL; sub_msg = tmsg) { tmsg = TAILQ_NEXT(sub_msg, m_tqe); TAILQ_REMOVE(&frag_msgq, sub_msg, m_tqe); req_forward(ctx, conn, sub_msg); } ASSERT(TAILQ_EMPTY(&frag_msgq)); return; }
int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { pthread_mutex_t pmutex; enum pthread_mutextype type = 0; int protocol = 0; int ceiling = 0; long flags = 0; int ret = 0; if (mutex == NULL) ret = EINVAL; /* Check if default mutex attributes: */ else if (mutex_attr == NULL || *mutex_attr == NULL) { /* Default to a (error checking) POSIX mutex: */ type = PTHREAD_MUTEX_ERRORCHECK; protocol = PTHREAD_PRIO_NONE; ceiling = PTHREAD_MAX_PRIORITY; flags = 0; } /* Check mutex type: */ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX)) /* Return an invalid argument error: */ ret = EINVAL; /* Check mutex protocol: */ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT)) /* Return an invalid argument error: */ ret = EINVAL; else { /* Use the requested mutex type and protocol: */ type = (*mutex_attr)->m_type; protocol = (*mutex_attr)->m_protocol; ceiling = (*mutex_attr)->m_ceiling; flags = (*mutex_attr)->m_flags; } /* Check no errors so far: */ if (ret == 0) { if ((pmutex = (pthread_mutex_t) malloc(sizeof(struct pthread_mutex))) == NULL) ret = ENOMEM; else { /* Set the mutex flags: */ pmutex->m_flags = flags; /* Process according to mutex type: */ switch (type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: /* Nothing to do here. */ break; /* Single UNIX Spec 2 recursive mutex: */ case PTHREAD_MUTEX_RECURSIVE: /* Reset the mutex count: */ pmutex->m_data.m_count = 0; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } if (ret == 0) { /* Initialise the rest of the mutex: */ TAILQ_INIT(&pmutex->m_queue); pmutex->m_flags |= MUTEX_FLAGS_INITED; pmutex->m_owner = NULL; pmutex->m_type = type; pmutex->m_protocol = protocol; pmutex->m_refcount = 0; if (protocol == PTHREAD_PRIO_PROTECT) pmutex->m_prio = ceiling; else pmutex->m_prio = 0; pmutex->m_saved_prio = 0; _MUTEX_INIT_LINK(pmutex); _SPINLOCK_INIT(&pmutex->lock); *mutex = pmutex; } else { free((void *)pmutex); *mutex = NULL; } } } /* Return the completion status: */ return(ret); }
static int vdpau_codec_create(media_codec_t *mc, const media_codec_params_t *mcp, media_pipe_t *mp) { VdpDecoderProfile profile; vdpau_dev_t *vd = mp->mp_vdpau_dev; VdpStatus r; int refframes; AVCodec *codec; if(vd == NULL) return 1; if(mcp == NULL || mcp->width == 0 || mcp->height == 0) return 1; switch(mc->codec_id) { case CODEC_ID_MPEG1VIDEO: profile = VDP_DECODER_PROFILE_MPEG1; codec = avcodec_find_decoder_by_name("mpegvideo_vdpau"); refframes = 2; break; case CODEC_ID_MPEG2VIDEO: profile = VDP_DECODER_PROFILE_MPEG2_MAIN; codec = avcodec_find_decoder_by_name("mpegvideo_vdpau"); refframes = 2; break; case CODEC_ID_H264: profile = VDP_DECODER_PROFILE_H264_HIGH; codec = avcodec_find_decoder_by_name("h264_vdpau"); refframes = 16; break; #if 0 // Seems broken case CODEC_ID_VC1: profile = VDP_DECODER_PROFILE_VC1_ADVANCED; mc->codec = avcodec_find_decoder_by_name("vc1_vdpau"); refframes = 16; break; case CODEC_ID_WMV3: profile = VDP_DECODER_PROFILE_VC1_MAIN; mc->codec = avcodec_find_decoder_by_name("wmv3_vdpau"); refframes = 16; break; #endif default: return 1; } if(codec == NULL) return -1; vdpau_codec_t *vc = calloc(1, sizeof(vdpau_codec_t)); TAILQ_INIT(&vc->vc_vvs_alloc); TAILQ_INIT(&vc->vc_vvs_free); vc->vc_vd = vd; vc->vc_width = mcp->width; if(mcp->height == 1088) vc->vc_height = 1080; else vc->vc_height = mcp->height; vc->vc_profile = profile; vc->vc_refframes = refframes; r = vd->vdp_decoder_create(vd->vd_dev, vc->vc_profile, vc->vc_width, vc->vc_height, vc->vc_refframes, &vc->vc_decoder); if(r != VDP_STATUS_OK) { TRACE(TRACE_INFO, "VDPAU", "Unable to create decoder: %s", vdpau_errstr(vd, r)); vc_destroy(vc); return -1; } r = vdpau_create_buffers(vc, vc->vc_width, vc->vc_height, vc->vc_refframes + 5); if(r != VDP_STATUS_OK) { TRACE(TRACE_INFO, "VDPAU", "Unable to allocate decoding buffers"); vc_destroy(vc); return -1; } TRACE(TRACE_DEBUG, "VDPAU", "Decoder initialized"); mc->ctx = avcodec_alloc_context3(codec); if(mcp->extradata != NULL) { mc->ctx->extradata = calloc(1, mcp->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(mc->ctx->extradata, mcp->extradata, mcp->extradata_size); mc->ctx->extradata_size = mcp->extradata_size; } if(avcodec_open2(mc->ctx, codec, NULL) < 0) { free(mc->ctx); vc_destroy(vc); return -1; } mc->ctx->get_buffer = vdpau_get_buffer; mc->ctx->release_buffer = vdpau_release_buffer; mc->ctx->draw_horiz_band = vdpau_draw_horiz_band; mc->ctx->get_format = vdpau_get_pixfmt; mc->ctx->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD; mc->ctx->opaque = mc; mc->opaque = vc; mc->decode = vdpau_decode; mc->flush = video_flush_avctx; mc->close = vdpau_codec_close; mc->reinit = vdpau_codec_reinit; return 0; }
static void lka_imsg(struct imsgev *iev, struct imsg *imsg) { struct submit_status *ss; struct secret *secret; struct mapel *mapel; struct rule *rule; struct map *map; struct map *mp; void *tmp; if (imsg->hdr.type == IMSG_DNS_HOST || imsg->hdr.type == IMSG_DNS_MX || imsg->hdr.type == IMSG_DNS_PTR) { dns_async(iev, imsg->hdr.type, imsg->data); return; } if (iev->proc == PROC_MFA) { switch (imsg->hdr.type) { case IMSG_LKA_MAIL: ss = imsg->data; ss->code = 530; if (ss->u.maddr.user[0] == '\0' && ss->u.maddr.domain[0] == '\0') ss->code = 250; else if (lka_verify_mail(&ss->u.maddr)) ss->code = 250; imsg_compose_event(iev, IMSG_LKA_MAIL, 0, 0, -1, ss, sizeof *ss); return; case IMSG_LKA_RULEMATCH: ss = imsg->data; rule = ruleset_match(&ss->envelope); if (rule == NULL) ss->code = (errno == EAGAIN) ? 451 : 530; else ss->code = (rule->r_decision == R_ACCEPT) ? 250 : 530; imsg_compose_event(iev, IMSG_LKA_RULEMATCH, 0, 0, -1, ss, sizeof *ss); return; case IMSG_LKA_RCPT: lka_session(imsg->data); return; } } if (iev->proc == PROC_MTA) { switch (imsg->hdr.type) { case IMSG_LKA_SECRET: { struct map_credentials *map_credentials; secret = imsg->data; map = map_findbyname(secret->mapname); if (map == NULL) { log_warn("warn: lka: credentials map %s is missing", secret->mapname); imsg_compose_event(iev, IMSG_LKA_SECRET, 0, 0, -1, secret, sizeof *secret); return; } map_credentials = map_lookup(map->m_id, secret->host, K_CREDENTIALS); log_debug("debug: lka: %s credentials lookup (%d)", secret->host, map_credentials != NULL); secret->secret[0] = '\0'; if (map_credentials == NULL) log_warnx("warn: %s credentials not found", secret->host); else if (lka_encode_credentials(secret->secret, sizeof secret->secret, map_credentials) == 0) log_warnx("warn: %s credentials parse fail", secret->host); imsg_compose_event(iev, IMSG_LKA_SECRET, 0, 0, -1, secret, sizeof *secret); free(map_credentials); return; } } } if (iev->proc == PROC_PARENT) { switch (imsg->hdr.type) { case IMSG_CONF_START: env->sc_rules_reload = xcalloc(1, sizeof *env->sc_rules, "lka:sc_rules_reload"); env->sc_maps_reload = xcalloc(1, sizeof *env->sc_maps, "lka:sc_maps_reload"); TAILQ_INIT(env->sc_rules_reload); TAILQ_INIT(env->sc_maps_reload); return; case IMSG_CONF_RULE: rule = xmemdup(imsg->data, sizeof *rule, "lka:rule"); TAILQ_INSERT_TAIL(env->sc_rules_reload, rule, r_entry); return; case IMSG_CONF_MAP: map = xmemdup(imsg->data, sizeof *map, "lka:map"); TAILQ_INIT(&map->m_contents); TAILQ_INSERT_TAIL(env->sc_maps_reload, map, m_entry); tmp = env->sc_maps; env->sc_maps = env->sc_maps_reload; mp = map_open(map); if (mp == NULL) errx(1, "lka: could not open map \"%s\"", map->m_name); map_close(map, mp); env->sc_maps = tmp; return; case IMSG_CONF_RULE_SOURCE: rule = TAILQ_LAST(env->sc_rules_reload, rulelist); tmp = env->sc_maps; env->sc_maps = env->sc_maps_reload; rule->r_sources = map_findbyname(imsg->data); if (rule->r_sources == NULL) fatalx("lka: maps inconsistency"); env->sc_maps = tmp; return; case IMSG_CONF_MAP_CONTENT: map = TAILQ_LAST(env->sc_maps_reload, maplist); mapel = xmemdup(imsg->data, sizeof *mapel, "lka:mapel"); TAILQ_INSERT_TAIL(&map->m_contents, mapel, me_entry); return; case IMSG_CONF_END: if (env->sc_rules) purge_config(PURGE_RULES); if (env->sc_maps) purge_config(PURGE_MAPS); env->sc_rules = env->sc_rules_reload; env->sc_maps = env->sc_maps_reload; /* start fulfilling requests */ event_add(&env->sc_ievs[PROC_MTA]->ev, NULL); event_add(&env->sc_ievs[PROC_MFA]->ev, NULL); event_add(&env->sc_ievs[PROC_SMTP]->ev, NULL); return; case IMSG_CTL_VERBOSE: log_verbose(*(int *)imsg->data); return; case IMSG_PARENT_FORWARD_OPEN: lka_session_forward_reply(imsg->data, imsg->fd); return; } } if (iev->proc == PROC_CONTROL) { switch (imsg->hdr.type) { case IMSG_LKA_UPDATE_MAP: map = map_findbyname(imsg->data); if (map == NULL) { log_warnx("warn: lka: no such map \"%s\"", (char *)imsg->data); return; } map_update(map); return; } } errx(1, "lka_imsg: unexpected %s imsg", imsg_to_str(imsg->hdr.type)); }
/* * ex_aci -- * Append, change, insert in ex. */ static int ex_aci(SCR *sp, EXCMD *cmdp, enum which cmd) { CHAR_T *p, *t; GS *gp; TEXT *tp; TEXTH tiq[] = {{ 0 }}; recno_t cnt = 0, lno; size_t len; u_int32_t flags; int need_newline; gp = sp->gp; NEEDFILE(sp, cmdp); /* * If doing a change, replace lines for as long as possible. Then, * append more lines or delete remaining lines. Changes to an empty * file are appends, inserts are the same as appends to the previous * line. * * !!! * Set the address to which we'll append. We set sp->lno to this * address as well so that autoindent works correctly when get text * from the user. */ lno = cmdp->addr1.lno; sp->lno = lno; if ((cmd == CHANGE || cmd == INSERT) && lno != 0) --lno; /* * !!! * If the file isn't empty, cut changes into the unnamed buffer. */ if (cmd == CHANGE && cmdp->addr1.lno != 0 && (cut(sp, NULL, &cmdp->addr1, &cmdp->addr2, CUT_LINEMODE) || del(sp, &cmdp->addr1, &cmdp->addr2, 1))) return (1); /* * !!! * Anything that was left after the command separator becomes part * of the inserted text. Apparently, it was common usage to enter: * * :g/pattern/append|stuff1 * * and append the line of text "stuff1" to the lines containing the * pattern. It was also historically legal to enter: * * :append|stuff1 * stuff2 * . * * and the text on the ex command line would be appended as well as * the text inserted after it. There was an historic bug however, * that the user had to enter *two* terminating lines (the '.' lines) * to terminate text input mode, in this case. This whole thing * could be taken too far, however. Entering: * * :append|stuff1\ * stuff2 * stuff3 * . * * i.e. mixing and matching the forms confused the historic vi, and, * not only did it take two terminating lines to terminate text input * mode, but the trailing backslashes were retained on the input. We * match historic practice except that we discard the backslashes. * * Input lines specified on the ex command line lines are separated by * <newline>s. If there is a trailing delimiter an empty line was * inserted. There may also be a leading delimiter, which is ignored * unless it's also a trailing delimiter. It is possible to encounter * a termination line, i.e. a single '.', in a global command, but not * necessary if the text insert command was the last of the global * commands. */ if (cmdp->save_cmdlen != 0) { for (p = cmdp->save_cmd, len = cmdp->save_cmdlen; len > 0; p = t) { for (t = p; len > 0 && t[0] != '\n'; ++t, --len); if (t != p || len == 0) { if (F_ISSET(sp, SC_EX_GLOBAL) && t - p == 1 && p[0] == '.') { ++t; if (len > 0) --len; break; } if (db_append(sp, 1, lno++, p, t - p)) return (1); } if (len != 0) { ++t; if (--len == 0 && db_append(sp, 1, lno++, NULL, 0)) return (1); } } /* * If there's any remaining text, we're in a global, and * there's more command to parse. * * !!! * We depend on the fact that non-global commands will eat the * rest of the command line as text input, and before getting * any text input from the user. Otherwise, we'd have to save * off the command text before or during the call to the text * input function below. */ if (len != 0) cmdp->save_cmd = t; cmdp->save_cmdlen = len; } if (F_ISSET(sp, SC_EX_GLOBAL)) { if ((sp->lno = lno) == 0 && db_exist(sp, 1)) sp->lno = 1; return (0); } /* * If not in a global command, read from the terminal. * * If this code is called by vi, we want to reset the terminal and use * ex's line get routine. It actually works fine if we use vi's get * routine, but it doesn't look as nice. Maybe if we had a separate * window or something, but getting a line at a time looks awkward. * However, depending on the screen that we're using, that may not * be possible. */ if (F_ISSET(sp, SC_VI)) { if (gp->scr_screen(sp, SC_EX)) { ex_wemsg(sp, cmdp->cmd->name, EXM_NOCANON); return (1); } /* If we're still in the vi screen, move out explicitly. */ need_newline = !F_ISSET(sp, SC_SCR_EXWROTE); F_SET(sp, SC_SCR_EX | SC_SCR_EXWROTE); if (need_newline) (void)ex_puts(sp, "\n"); /* * !!! * Users of historical versions of vi sometimes get confused * when they enter append mode, and can't seem to get out of * it. Give them an informational message. */ (void)ex_puts(sp, msg_cat(sp, "273|Entering ex input mode.", NULL)); (void)ex_puts(sp, "\n"); (void)ex_fflush(sp); } /* * Set input flags; the ! flag turns off autoindent for append, * change and insert. */ LF_INIT(TXT_DOTTERM | TXT_NUMBER); if (!FL_ISSET(cmdp->iflags, E_C_FORCE) && O_ISSET(sp, O_AUTOINDENT)) LF_SET(TXT_AUTOINDENT); if (O_ISSET(sp, O_BEAUTIFY)) LF_SET(TXT_BEAUTIFY); /* * This code can't use the common screen TEXTH structure (sp->tiq), * as it may already be in use, e.g. ":append|s/abc/ABC/" would fail * as we are only halfway through the text when the append code fires. * Use a local structure instead. (The ex code would have to use a * local structure except that we're guaranteed to finish remaining * characters in the common TEXTH structure when they were inserted * into the file, above.) */ TAILQ_INIT(tiq); if (ex_txt(sp, tiq, 0, flags)) return (1); TAILQ_FOREACH(tp, tiq, q) { if (db_append(sp, 1, lno++, tp->lb, tp->len)) return (1); ++cnt; } /* * Set sp->lno to the final line number value (correcting for a * possible 0 value) as that's historically correct for the final * line value, whether or not the user entered any text. */ if ((sp->lno = lno) == 0 && db_exist(sp, 1)) sp->lno = 1; return (0); }
/* * forkproc * * Description: Create a new process structure, given a parent process * structure. * * Parameters: parent_proc The parent process * * Returns: !NULL The new process structure * NULL Error (insufficient free memory) * * Note: When successful, the newly created process structure is * partially initialized; if a caller needs to deconstruct the * returned structure, they must call forkproc_free() to do so. */ proc_t forkproc(proc_t parent_proc) { proc_t child_proc; /* Our new process */ static int nextpid = 0, pidwrap = 0, nextpidversion = 0; static uint64_t nextuniqueid = 0; int error = 0; struct session *sessp; uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread()); MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK); if (child_proc == NULL) { printf("forkproc: M_PROC zone exhausted\n"); goto bad; } /* zero it out as we need to insert in hash */ bzero(child_proc, sizeof *child_proc); MALLOC_ZONE(child_proc->p_stats, struct pstats *, sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK); if (child_proc->p_stats == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n"); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *, sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK); if (child_proc->p_sigacts == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n"); FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } /* allocate a callout for use by interval timers */ child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc); if (child_proc->p_rcall == NULL) { FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS); FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } /* * Find an unused PID. */ proc_list_lock(); nextpid++; retry: /* * If the process ID prototype has wrapped around, * restart somewhat above 0, as the low-numbered procs * tend to include daemons that don't exit. */ if (nextpid >= PID_MAX) { nextpid = 100; pidwrap = 1; } if (pidwrap != 0) { /* if the pid stays in hash both for zombie and runniing state */ if (pfind_locked(nextpid) != PROC_NULL) { nextpid++; goto retry; } if (pgfind_internal(nextpid) != PGRP_NULL) { nextpid++; goto retry; } if (session_find_internal(nextpid) != SESSION_NULL) { nextpid++; goto retry; } } nprocs++; child_proc->p_pid = nextpid; child_proc->p_idversion = nextpidversion++; /* kernel process is handcrafted and not from fork, so start from 1 */ child_proc->p_uniqueid = ++nextuniqueid; #if 1 if (child_proc->p_pid != 0) { if (pfind_locked(child_proc->p_pid) != PROC_NULL) panic("proc in the list already\n"); } #endif /* Insert in the hash */ child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE); LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash); proc_list_unlock(); /* * We've identified the PID we are going to use; initialize the new * process structure. */ child_proc->p_stat = SIDL; child_proc->p_pgrpid = PGRPID_DEAD; /* * The zero'ing of the proc was at the allocation time due to need * for insertion to hash. Copy the section that is to be copied * directly from the parent. */ bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy, (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy)); /* * Some flags are inherited from the parent. * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ #if !CONFIG_EMBEDDED child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_AFFINITY | P_DISABLE_ASLR | P_DELAYIDLESLEEP)); #else /* !CONFIG_EMBEDDED */ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_AFFINITY | P_DISABLE_ASLR)); #endif /* !CONFIG_EMBEDDED */ if (parent_proc->p_flag & P_PROFIL) startprofclock(child_proc); #if !CONFIG_EMBEDDED if (child_proc->p_legacy_behavior & PROC_LEGACY_BEHAVIOR_IOTHROTTLE) { throttle_legacy_process_incr(); } #endif /* * Note that if the current thread has an assumed identity, this * credential will be granted to the new process. */ child_proc->p_ucred = kauth_cred_get_with_ref(); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(child_proc); /* update audit session proc count */ AUDIT_SESSION_PROCNEW(child_proc); #if CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); #if CONFIG_DTRACE lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); #endif lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr); #else /* !CONFIG_FINE_LOCK_GROUPS */ lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr); #if CONFIG_DTRACE lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); #endif lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr); #endif /* !CONFIG_FINE_LOCK_GROUPS */ klist_init(&child_proc->p_klist); if (child_proc->p_textvp != NULLVP) { /* bump references to the text vnode */ /* Need to hold iocount across the ref call */ if (vnode_getwithref(child_proc->p_textvp) == 0) { error = vnode_ref(child_proc->p_textvp); vnode_put(child_proc->p_textvp); if (error != 0) child_proc->p_textvp = NULLVP; } } /* * Copy the parents per process open file table to the child; if * there is a per-thread current working directory, set the childs * per-process current working directory to that instead of the * parents. * * XXX may fail to copy descriptors to child */ child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir); #if SYSV_SHM if (parent_proc->vm_shm) { /* XXX may fail to attach shm to child */ (void)shmfork(parent_proc, child_proc); } #endif /* * inherit the limit structure to child */ proc_limitfork(parent_proc, child_proc); if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur; child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur; } /* Intialize new process stats, including start time */ /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */ bzero(&child_proc->p_stats->pstat_startzero, (unsigned) ((caddr_t)&child_proc->p_stats->pstat_endzero - (caddr_t)&child_proc->p_stats->pstat_startzero)); bzero(&child_proc->p_stats->user_p_prof, sizeof(struct user_uprof)); microtime(&child_proc->p_start); child_proc->p_stats->p_start = child_proc->p_start; /* for compat */ if (parent_proc->p_sigacts != NULL) (void)memcpy(child_proc->p_sigacts, parent_proc->p_sigacts, sizeof *child_proc->p_sigacts); else (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts); sessp = proc_session(parent_proc); if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag); session_rele(sessp); /* * block all signals to reach the process. * no transition race should be occuring with the child yet, * but indicate that the process is in (the creation) transition. */ proc_signalstart(child_proc, 0); proc_transstart(child_proc, 0); child_proc->p_pcaction = (parent_proc->p_pcaction) & P_PCMAX; TAILQ_INIT(&child_proc->p_uthlist); TAILQ_INIT(&child_proc->p_aio_activeq); TAILQ_INIT(&child_proc->p_aio_doneq); /* Inherit the parent flags for code sign */ child_proc->p_csflags = (parent_proc->p_csflags & ~CS_KILLED); /* * All processes have work queue locks; cleaned up by * reap_child_locked() */ workqueue_init_lock(child_proc); /* * Copy work queue information * * Note: This should probably only happen in the case where we are * creating a child that is a copy of the parent; since this * routine is called in the non-duplication case of vfork() * or posix_spawn(), then this information should likely not * be duplicated. * * <rdar://6640553> Work queue pointers that no longer point to code */ child_proc->p_wqthread = parent_proc->p_wqthread; child_proc->p_threadstart = parent_proc->p_threadstart; child_proc->p_pthsize = parent_proc->p_pthsize; child_proc->p_targconc = parent_proc->p_targconc; if ((parent_proc->p_lflag & P_LREGISTER) != 0) { child_proc->p_lflag |= P_LREGISTER; } child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset; #if PSYNCH pth_proc_hashinit(child_proc); #endif /* PSYNCH */ #if CONFIG_LCTX child_proc->p_lctx = NULL; /* Add new process to login context (if any). */ if (parent_proc->p_lctx != NULL) { /* * <rdar://6640564> This should probably be delayed in the * vfork() or posix_spawn() cases. */ LCTX_LOCK(parent_proc->p_lctx); enterlctx(child_proc, parent_proc->p_lctx, 0); } #endif /* Default to no tracking of dirty state */ child_proc->p_dirty = 0; bad: return(child_proc); }
int main(int argc, char *argv[]) { int error __unused, i, r, s; FILE *pidf; int ch = 0; while ((ch = getopt(argc, argv, "d")) != -1) { switch(ch) { case 'd': debugopt = 1; break; default: usage(); /* NOT REACHED */ } } argc -= optind; argv += optind; TAILQ_INIT(&pdev_array_list); TAILQ_INIT(&udev_monitor_list); r = ignore_signal(SIGPIPE); if (r != 0) err(1, "could not ignore_signal SIGPIPE"); r = pthread_mutex_init(&(monitor_lock), NULL); if (r != 0) err(1, "could not allocate a pthread_mutex"); if ((udevfd = open(UDEV_DEVICE_PATH, O_RDWR | O_NONBLOCK)) == -1) err(1, "%s", UDEV_DEVICE_PATH); unblock_descriptor(udevfd); s = init_local_server(LISTEN_SOCKET_FILE, SOCK_STREAM, 0); if (s < 0) err(1, "init_local_server"); pidf = fopen("/var/run/udevd.pid", "w"); #if 0 if (pidf == NULL) err(1, "pidfile"); #endif set_signal(SIGTERM, killed); set_signal(SIGHUP, hangup); if (debugopt == 0) if (daemon(0, 0) == -1) err(1, "daemon"); if (pidf != NULL) { fprintf(pidf, "%ld\n", (long)getpid()); fclose(pidf); } syslog(LOG_ERR, "udevd started"); pdev_array_entry_insert(udev_getdevs(udevfd)); memset(fds, 0 , sizeof(fds)); fds[UDEV_DEVICE_FD_IDX].fd = udevfd; fds[UDEV_DEVICE_FD_IDX].events = POLLIN; fds[UDEV_SOCKET_FD_IDX].fd = s; fds[UDEV_SOCKET_FD_IDX].events = POLLIN | POLLPRI; for (;;) { r = poll(fds, NFDS, -1); if (r < 0) { if (hangup_ongoing == 0) { if (errno == EINTR) { usleep(5000); continue; } else { err(1, "polling..."); } } else { usleep(20000); /* 20 ms */ continue; } } for (i = 0; (i < NFDS) && (r > 0); i++) { if (fds[i].revents == 0) continue; --r; switch (i) { case UDEV_DEVICE_FD_IDX: udev_read_event(udevfd); break; case UDEV_SOCKET_FD_IDX: handle_new_connection(s); break; default: break; } } } syslog(LOG_ERR, "udevd is exiting normally"); return 0; }
int main(int argc, char *argv[]) { int ch; int retval; char *inputfilename; scope_t *sentinal; STAILQ_INIT(&patches); SLIST_INIT(&search_path); STAILQ_INIT(&seq_program); TAILQ_INIT(&cs_tailq); SLIST_INIT(&scope_stack); /* Set Sentinal scope node */ sentinal = scope_alloc(); sentinal->type = SCOPE_ROOT; includes_search_curdir = 1; appname = *argv; regfile = NULL; listfile = NULL; #if DEBUG yy_flex_debug = 0; mm_flex_debug = 0; yydebug = 0; mmdebug = 0; #endif while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:X")) != -1) { switch(ch) { case 'd': #if DEBUG if (strcmp(optarg, "s") == 0) { yy_flex_debug = 1; mm_flex_debug = 1; } else if (strcmp(optarg, "p") == 0) { yydebug = 1; mmdebug = 1; } else { fprintf(stderr, "%s: -d Requires either an " "'s' or 'p' argument\n", appname); usage(); } #else stop("-d: Assembler not built with debugging " "information", EX_SOFTWARE); #endif break; case 'i': stock_include_file = optarg; break; case 'l': /* Create a program listing */ if ((listfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } listfilename = optarg; break; case 'n': /* Don't complain about the -nostdinc directrive */ if (strcmp(optarg, "ostdinc")) { fprintf(stderr, "%s: Unknown option -%c%s\n", appname, ch, optarg); usage(); /* NOTREACHED */ } break; case 'o': if ((ofile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } ofilename = optarg; break; case 'p': /* Create Register Diagnostic "printing" Functions */ if ((regdiagfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regdiagfilename = optarg; break; case 'r': if ((regfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regfilename = optarg; break; case 'I': { path_entry_t include_dir; if (strcmp(optarg, "-") == 0) { if (includes_search_curdir == 0) { fprintf(stderr, "%s: Warning - '-I-' " "specified multiple " "times\n", appname); } includes_search_curdir = 0; for (include_dir = SLIST_FIRST(&search_path); include_dir != NULL; include_dir = SLIST_NEXT(include_dir, links)) /* * All entries before a '-I-' only * apply to includes specified with * quotes instead of "<>". */ include_dir->quoted_includes_only = 1; } else { include_dir = (path_entry_t)malloc(sizeof(*include_dir)); if (include_dir == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->directory = strdup(optarg); if (include_dir->directory == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->quoted_includes_only = 0; SLIST_INSERT_HEAD(&search_path, include_dir, links); } break; } case 'X': /* icc version of -nostdinc */ break; case '?': default: usage(); /* NOTREACHED */ } } argc -= optind; argv += optind; if (argc != 1) { fprintf(stderr, "%s: No input file specifiled\n", appname); usage(); /* NOTREACHED */ } if (regdiagfile != NULL && (regfile == NULL || stock_include_file == NULL)) { fprintf(stderr, "%s: The -p option requires the -r and -i options.\n", appname); usage(); /* NOTREACHED */ } symtable_open(); inputfilename = *argv; include_file(*argv, SOURCE_FILE); retval = yyparse(); if (retval == 0) { if (SLIST_FIRST(&scope_stack) == NULL || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) { stop("Unterminated conditional expression", EX_DATAERR); /* NOTREACHED */ } /* Process outmost scope */ process_scope(SLIST_FIRST(&scope_stack)); /* * Decend the tree of scopes and insert/emit * patches as appropriate. We perform a depth first * tranversal, recursively handling each scope. */ /* start at the root scope */ dump_scope(SLIST_FIRST(&scope_stack)); /* Patch up forward jump addresses */ back_patch(); if (ofile != NULL) output_code(); if (regfile != NULL) symtable_dump(regfile, regdiagfile); if (listfile != NULL) output_listing(inputfilename); } stop(NULL, 0); /* NOTREACHED */ return (0); }
int vdpau_codec_create(media_codec_t *mc, enum CodecID id, AVCodecContext *ctx, media_codec_params_t *mcp, media_pipe_t *mp) { VdpDecoderProfile profile; vdpau_dev_t *vd = mp->mp_vdpau_dev; VdpStatus r; int refframes; if(vd == NULL) return 1; if(mcp->width == 0 || mcp->height == 0) return 1; switch(id) { case CODEC_ID_MPEG1VIDEO: profile = VDP_DECODER_PROFILE_MPEG1; mc->codec = avcodec_find_decoder_by_name("mpegvideo_vdpau"); refframes = 2; break; case CODEC_ID_MPEG2VIDEO: profile = VDP_DECODER_PROFILE_MPEG2_MAIN; mc->codec = avcodec_find_decoder_by_name("mpegvideo_vdpau"); refframes = 2; break; case CODEC_ID_H264: profile = VDP_DECODER_PROFILE_H264_HIGH; mc->codec = avcodec_find_decoder_by_name("h264_vdpau"); refframes = 16; break; #if 0 // Seems broken case CODEC_ID_VC1: profile = VDP_DECODER_PROFILE_VC1_ADVANCED; mc->codec = avcodec_find_decoder_by_name("vc1_vdpau"); refframes = 16; break; case CODEC_ID_WMV3: profile = VDP_DECODER_PROFILE_VC1_MAIN; mc->codec = avcodec_find_decoder_by_name("wmv3_vdpau"); refframes = 16; break; #endif default: return 1; } if(mc->codec == NULL) return -1; vdpau_codec_t *vc = calloc(1, sizeof(vdpau_codec_t)); TAILQ_INIT(&vc->vc_vvs_alloc); TAILQ_INIT(&vc->vc_vvs_free); vc->vc_vd = vd; vc->vc_width = mcp->width; vc->vc_height = mcp->height; vc->vc_profile = profile; vc->vc_refframes = refframes; r = vd->vdp_decoder_create(vd->vd_dev, vc->vc_profile, vc->vc_width, vc->vc_height, vc->vc_refframes, &vc->vc_decoder); if(r != VDP_STATUS_OK) { TRACE(TRACE_INFO, "VDPAU", "Unable to create decoder: %s", vdpau_errstr(vd, r)); vc_destroy(vc); return -1; } r = vdpau_create_buffers(vc, vc->vc_width, vc->vc_height, vc->vc_refframes + 5); if(r != VDP_STATUS_OK) { TRACE(TRACE_INFO, "VDPAU", "Unable to allocate decoding buffers"); vc_destroy(vc); return -1; } TRACE(TRACE_DEBUG, "VDPAU", "Decoder initialized"); mc->codec_ctx = ctx ?: avcodec_alloc_context(); mc->codec_ctx->codec_id = mc->codec->id; mc->codec_ctx->codec_type = mc->codec->type; if(avcodec_open(mc->codec_ctx, mc->codec) < 0) { if(ctx == NULL) free(mc->codec_ctx); mc->codec = NULL; vc_destroy(vc); return -1; } mc->codec_ctx->get_buffer = vdpau_get_buffer; mc->codec_ctx->release_buffer = vdpau_release_buffer; mc->codec_ctx->draw_horiz_band = vdpau_draw_horiz_band; mc->codec_ctx->get_format = vdpau_get_pixfmt; mc->codec_ctx->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD; mc->codec_ctx->opaque = mc; mc->opaque = vc; mc->decode = vdpau_decode; mc->close = vdpau_codec_close; mc->reinit = vdpau_codec_reinit; return 0; }
void _glfwInitEventQueueMir(EventQueue* queue) { TAILQ_INIT(&queue->head); }
/* * Attach/setup the common net80211 state. Called by * the driver on attach to prior to creating any vap's. */ void ieee80211_ifattach(struct ieee80211com *ic, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct sockaddr_dl *sdl; struct ifaddr *ifa; KASSERT(ifp->if_type == IFT_IEEE80211, ("if_type %d", ifp->if_type)); TAILQ_INIT(&ic->ic_vaps); /* Create a taskqueue for all state changes */ ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &ic->ic_tq); taskqueue_start_threads(&ic->ic_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", ifp->if_xname); /* * Fill in 802.11 available channel set, mark all * available channels as active, and pick a default * channel if not already specified. */ ieee80211_media_init(ic); ic->ic_update_mcast = null_update_mcast; ic->ic_update_promisc = null_update_promisc; ic->ic_hash_key = karc4random(); ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT; ic->ic_lintval = ic->ic_bintval; ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX; ieee80211_crypto_attach(ic); ieee80211_node_attach(ic); ieee80211_power_attach(ic); ieee80211_proto_attach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_attach(ic); #endif ieee80211_ht_attach(ic); ieee80211_scan_attach(ic); ieee80211_regdomain_attach(ic); ieee80211_dfs_attach(ic); ieee80211_sysctl_attach(ic); ifp->if_addrlen = IEEE80211_ADDR_LEN; ifp->if_hdrlen = 0; if_attach(ifp, &wlan_global_serializer); ifp->if_mtu = IEEE80211_MTU_MAX; ifp->if_broadcastaddr = ieee80211broadcastaddr; ifp->if_output = null_output; ifp->if_input = null_input; /* just in case */ ifp->if_resolvemulti = NULL; /* NB: callers check */ ifa = ifaddr_byindex(ifp->if_index); KASSERT(ifa != NULL, ("%s: no lladdr!", __func__)); sdl = (struct sockaddr_dl *)ifa->ifa_addr; sdl->sdl_type = IFT_ETHER; /* XXX IFT_IEEE80211? */ sdl->sdl_alen = IEEE80211_ADDR_LEN; IEEE80211_ADDR_COPY(LLADDR(sdl), macaddr); // IFAFREE(ifa); }
void test_ofp_flow_reply_create_02(void) { lagopus_result_t ret = LAGOPUS_RESULT_ANY_FAILURES; struct flow_stats *flow_stats = NULL; struct match *match = NULL; const char *header_data[2] = { "04 13 ff b8 00 00 00 10 00 01 00 01 00 00 00 00 ", "04 13 00 58 00 00 00 10 00 01 00 00 00 00 00 00 " }; const char *body_data[2] = { "00 48 01 00 00 00 00 02 00 00 00 03 00 04 00 05 " "00 06 00 07 00 00 00 00 00 00 00 00 00 00 00 08 " "00 00 00 00 00 00 00 09 00 00 00 00 00 00 00 0a " "00 01 00 18 00 00 01 10 00 00 00 00 00 00 00 00 " "00 00 00 00 00 00 00 00", "00 48 01 00 00 00 00 02 00 00 00 03 00 04 00 05 " "00 06 00 07 00 00 00 00 00 00 00 00 00 00 00 08 " "00 00 00 00 00 00 00 09 00 00 00 00 00 00 00 0a " "00 01 00 18 00 00 01 10 00 00 00 00 00 00 00 00 " "00 00 00 00 00 00 00 00" }; size_t nums[2] = {909, 1}; int i; /* data */ TAILQ_INIT(&s_flow_stats_list); for (i = 0; i < 910; i++) { if ((flow_stats = s_flow_stats_alloc()) != NULL) { /* flow_stats = 48, match = 24, sum = 72 */ flow_stats->ofp.length = 0; flow_stats->ofp.table_id = 0x01; flow_stats->ofp.duration_sec = 0x02; flow_stats->ofp.duration_nsec = 0x03; flow_stats->ofp.priority = 0x04; flow_stats->ofp.idle_timeout = 0x05; flow_stats->ofp.hard_timeout = 0x06; flow_stats->ofp.flags = 0x07; flow_stats->ofp.cookie = 0x08; flow_stats->ofp.packet_count = 0x09; flow_stats->ofp.byte_count = 0x0a; if ((match = match_alloc(16)) != NULL) { match->oxm_class = 0x00; match->oxm_field = 0x01; match->oxm_length = 0x10; TAILQ_INSERT_TAIL(&(flow_stats->match_list), match, entry); } TAILQ_INSERT_TAIL(&s_flow_stats_list, flow_stats, entry); } else { TEST_FAIL_MESSAGE("allocation error."); } } ret = check_pbuf_list_across_packet_create(s_ofp_flow_reply_create_wrap, header_data, body_data, nums, 2); TEST_ASSERT_EQUAL_MESSAGE(LAGOPUS_RESULT_OK, ret, "create port 0 error."); /* free */ while ((flow_stats = TAILQ_FIRST(&s_flow_stats_list)) != NULL) { TAILQ_REMOVE(&s_flow_stats_list, flow_stats, entry); ofp_match_list_elem_free(&flow_stats->match_list); ofp_instruction_list_elem_free(&flow_stats->instruction_list); free(flow_stats); } }
int rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id) { struct bond_dev_private *internals = NULL; struct rte_eth_dev *eth_dev = NULL; /* now do all data allocation - for eth_dev structure, dummy pci driver * and internal (private) data */ if (name == NULL) { RTE_BOND_LOG(ERR, "Invalid name specified"); goto err; } if (socket_id >= number_of_sockets()) { RTE_BOND_LOG(ERR, "Invalid socket id specified to create bonded device on."); goto err; } internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id); if (internals == NULL) { RTE_BOND_LOG(ERR, "Unable to malloc internals on socket"); goto err; } /* reserve an ethdev entry */ eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); if (eth_dev == NULL) { RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev"); goto err; } eth_dev->data->dev_private = internals; eth_dev->data->nb_rx_queues = (uint16_t)1; eth_dev->data->nb_tx_queues = (uint16_t)1; TAILQ_INIT(&(eth_dev->link_intr_cbs)); eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0, socket_id); if (eth_dev->data->mac_addrs == NULL) { RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs"); goto err; } eth_dev->data->dev_started = 0; eth_dev->data->promiscuous = 0; eth_dev->data->scattered_rx = 0; eth_dev->data->all_multicast = 0; eth_dev->dev_ops = &default_dev_ops; eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_DETACHABLE; eth_dev->driver = NULL; eth_dev->data->kdrv = RTE_KDRV_NONE; eth_dev->data->drv_name = pmd_bond_driver_name; eth_dev->data->numa_node = socket_id; rte_spinlock_init(&internals->lock); internals->port_id = eth_dev->data->port_id; internals->mode = BONDING_MODE_INVALID; internals->current_primary_port = RTE_MAX_ETHPORTS + 1; internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2; internals->xmit_hash = xmit_l2_hash; internals->user_defined_mac = 0; internals->link_props_set = 0; internals->link_status_polling_enabled = 0; internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS; internals->link_down_delay_ms = 0; internals->link_up_delay_ms = 0; internals->slave_count = 0; internals->active_slave_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; internals->candidate_max_rx_pktlen = 0; internals->max_rx_pktlen = 0; /* Initially allow to choose any offload type */ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d", eth_dev->data->port_id, mode); goto err; } return eth_dev->data->port_id; err: rte_free(internals); if (eth_dev != NULL) { rte_free(eth_dev->data->mac_addrs); rte_eth_dev_release_port(eth_dev); } return -1; }
int main(int argc, char **argv) { char *envstr, **cmd_argv, **targv; int i, ret, cmd_argc; struct passwd *pw; struct stat st; char fpath[MAXPATHLEN]; tzset(); TAILQ_INIT(&cvs_variables); SLIST_INIT(&repo_locks); SLIST_INIT(&temp_files); hash_table_init(&created_directories, 100); hash_table_init(&created_cvs_directories, 100); /* check environment so command-line options override it */ if ((envstr = getenv("CVS_RSH")) != NULL) cvs_rsh = envstr; if (((envstr = getenv("CVSEDITOR")) != NULL) || ((envstr = getenv("VISUAL")) != NULL) || ((envstr = getenv("EDITOR")) != NULL)) cvs_editor = envstr; if ((envstr = getenv("CVSREAD")) != NULL) cvs_readonly = 1; if ((envstr = getenv("CVSREADONLYFS")) != NULL) { cvs_readonlyfs = 1; cvs_nolog = 1; } if ((cvs_homedir = getenv("HOME")) == NULL) { if ((pw = getpwuid(getuid())) != NULL) cvs_homedir = pw->pw_dir; } if ((envstr = getenv("TMPDIR")) != NULL) cvs_tmpdir = envstr; ret = cvs_getopt(argc, argv); argc -= ret; argv += ret; if (argc == 0) usage(); cmdp = cvs_findcmd(argv[0]); if (cmdp == NULL) { fprintf(stderr, "Unknown command: `%s'\n\n", argv[0]); fprintf(stderr, "CVS commands are:\n"); for (i = 0; cvs_cdt[i] != NULL; i++) fprintf(stderr, "\t%-16s%s\n", cvs_cdt[i]->cmd_name, cvs_cdt[i]->cmd_descr); exit(1); } /* * check the tmp dir, either specified through * the environment variable TMPDIR, or via * the global option -T <dir> */ if (stat(cvs_tmpdir, &st) == -1) fatal("stat failed on `%s': %s", cvs_tmpdir, strerror(errno)); else if (!S_ISDIR(st.st_mode)) fatal("`%s' is not valid temporary directory", cvs_tmpdir); if (cvs_readrc == 1 && cvs_homedir != NULL) { cvs_read_rcfile(); if (cvs_defargs != NULL) { if ((targv = cvs_makeargv(cvs_defargs, &i)) == NULL) fatal("failed to load default arguments to %s", __progname); cvs_getopt(i, targv); cvs_freeargv(targv, i); xfree(targv); } } /* setup signal handlers */ signal(SIGTERM, sighandler); signal(SIGINT, sighandler); signal(SIGHUP, sighandler); signal(SIGABRT, sighandler); signal(SIGALRM, sighandler); signal(SIGPIPE, sighandler); cvs_cmdop = cmdp->cmd_op; cmd_argc = cvs_build_cmd(&cmd_argv, argv, argc); cvs_file_init(); if (cvs_cmdop == CVS_OP_SERVER) { cmdp->cmd(cmd_argc, cmd_argv); cvs_cleanup(); return (0); } cvs_umask = umask(0); umask(cvs_umask); if ((current_cvsroot = cvsroot_get(".")) == NULL) { cvs_log(LP_ERR, "No CVSROOT specified! Please use the '-d' option"); fatal("or set the CVSROOT environment variable."); } if (current_cvsroot->cr_method != CVS_METHOD_LOCAL) { cmdp->cmd(cmd_argc, cmd_argv); cvs_cleanup(); return (0); } (void)xsnprintf(fpath, sizeof(fpath), "%s/%s", current_cvsroot->cr_dir, CVS_PATH_ROOT); if (stat(fpath, &st) == -1 && cvs_cmdop != CVS_OP_INIT) { if (errno == ENOENT) fatal("repository '%s' does not exist", current_cvsroot->cr_dir); else fatal("%s: %s", current_cvsroot->cr_dir, strerror(errno)); } else { if (!S_ISDIR(st.st_mode)) fatal("'%s' is not a directory", current_cvsroot->cr_dir); } if (cvs_cmdop != CVS_OP_INIT) { cvs_parse_configfile(); cvs_parse_modules(); } cmdp->cmd(cmd_argc, cmd_argv); cvs_cleanup(); return (0); }
static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; lwkt_serialize_init(&sc->vtblk_slz); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) sc->vtblk_flags |= VTBLK_FLAG_BARRIER; if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE)) sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG; vtblk_setup_sysctl(sc); /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } /* * Allocate working sglist. The number of segments may be too * large to safely store on the stack. */ sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); error = virtio_setup_intr(dev, &sc->vtblk_slz); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); }
/* * This function and pthread_create() do a lot of the same things. * It'd be nice to consolidate the common stuff in one place. */ static void init_main_thread(struct pthread *thread) { struct sched_param sched_param; /* Setup the thread attributes. */ thr_self(&thread->tid); thread->attr = _pthread_attr_default; /* * Set up the thread stack. * * Create a red zone below the main stack. All other stacks * are constrained to a maximum size by the parameters * passed to mmap(), but this stack is only limited by * resource limits, so this stack needs an explicitly mapped * red zone to protect the thread stack that is just beyond. */ if (mmap(_usrstack - _thr_stack_initial - _thr_guard_default, _thr_guard_default, 0, MAP_ANON, -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); /* * Mark the stack as an application supplied stack so that it * isn't deallocated. * * XXX - I'm not sure it would hurt anything to deallocate * the main thread stack because deallocation doesn't * actually free() it; it just puts it in the free * stack queue for later reuse. */ thread->attr.stackaddr_attr = _usrstack - _thr_stack_initial; thread->attr.stacksize_attr = _thr_stack_initial; thread->attr.guardsize_attr = _thr_guard_default; thread->attr.flags |= THR_STACK_USER; /* * Write a magic value to the thread structure * to help identify valid ones: */ thread->magic = THR_MAGIC; thread->cancel_enable = 1; thread->cancel_async = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&thread->mutexq); TAILQ_INIT(&thread->pp_mutexq); thread->state = PS_RUNNING; _thr_getscheduler(thread->tid, &thread->attr.sched_policy, &sched_param); thread->attr.prio = sched_param.sched_priority; #ifdef _PTHREAD_FORCED_UNWIND thread->unwind_stackend = _usrstack; #endif /* Others cleared to zero by thr_alloc() */ }
pid_t proc_run(struct privsep *ps, struct privsep_proc *p, struct privsep_proc *procs, u_int nproc, void (*init)(struct privsep *, struct privsep_proc *, void *), void *arg) { pid_t pid; struct passwd *pw; const char *root; struct control_sock *rcs; u_int n; if (ps->ps_noaction) return (0); proc_open(ps, p, procs, nproc); /* Fork child handlers */ switch (pid = fork()) { case -1: fatal("proc_run: cannot fork"); case 0: /* Set the process group of the current process */ setpgid(0, 0); break; default: return (pid); } pw = ps->ps_pw; if (p->p_id == PROC_CONTROL && ps->ps_instance == 0) { if (control_init(ps, &ps->ps_csock) == -1) fatalx(p->p_title); TAILQ_FOREACH(rcs, &ps->ps_rcsocks, cs_entry) if (control_init(ps, rcs) == -1) fatalx(p->p_title); } /* Change root directory */ if (p->p_chroot != NULL) root = p->p_chroot; else root = pw->pw_dir; if (chroot(root) == -1) fatal("proc_run: chroot"); if (chdir("/") == -1) fatal("proc_run: chdir(\"/\")"); privsep_process = p->p_id; setproctitle("%s", p->p_title); if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("proc_run: cannot drop privileges"); /* Fork child handlers */ for (n = 1; n < ps->ps_instances[p->p_id]; n++) { if (fork() == 0) { ps->ps_instance = p->p_instance = n; break; } } #ifdef DEBUG log_debug("%s: %s %d/%d, pid %d", __func__, p->p_title, ps->ps_instance + 1, ps->ps_instances[p->p_id], getpid()); #endif event_init(); signal_set(&ps->ps_evsigint, SIGINT, proc_sig_handler, p); signal_set(&ps->ps_evsigterm, SIGTERM, proc_sig_handler, p); signal_set(&ps->ps_evsigchld, SIGCHLD, proc_sig_handler, p); signal_set(&ps->ps_evsighup, SIGHUP, proc_sig_handler, p); signal_set(&ps->ps_evsigpipe, SIGPIPE, proc_sig_handler, p); signal_set(&ps->ps_evsigusr1, SIGUSR1, proc_sig_handler, p); signal_add(&ps->ps_evsigint, NULL); signal_add(&ps->ps_evsigterm, NULL); signal_add(&ps->ps_evsigchld, NULL); signal_add(&ps->ps_evsighup, NULL); signal_add(&ps->ps_evsigpipe, NULL); signal_add(&ps->ps_evsigusr1, NULL); proc_listen(ps, procs, nproc); if (p->p_id == PROC_CONTROL && ps->ps_instance == 0) { TAILQ_INIT(&ctl_conns); if (control_listen(&ps->ps_csock) == -1) fatalx(p->p_title); TAILQ_FOREACH(rcs, &ps->ps_rcsocks, cs_entry) if (control_listen(rcs) == -1) fatalx(p->p_title); }
int main(int argc, char **argv) { int debug_flag = 0, log_level = SYSLOG_LEVEL_INFO; int opt, fopt_count = 0, j; char *tname, *cp, line[NI_MAXHOST]; FILE *fp; u_long linenum; extern int optind; extern char *optarg; __progname = ssh_get_progname(argv[0]); init_rng(); seed_rng(); TAILQ_INIT(&tq); /* Ensure that fds 0, 1 and 2 are open or directed to /dev/null */ sanitise_stdfd(); if (argc <= 1) usage(); while ((opt = getopt(argc, argv, "Hv46p:T:t:f:")) != -1) { switch (opt) { case 'H': hash_hosts = 1; break; case 'p': ssh_port = a2port(optarg); if (ssh_port <= 0) { fprintf(stderr, "Bad port '%s'\n", optarg); exit(1); } break; case 'T': timeout = convtime(optarg); if (timeout == -1 || timeout == 0) { fprintf(stderr, "Bad timeout '%s'\n", optarg); usage(); } break; case 'v': if (!debug_flag) { debug_flag = 1; log_level = SYSLOG_LEVEL_DEBUG1; } else if (log_level < SYSLOG_LEVEL_DEBUG3) log_level++; else fatal("Too high debugging level."); break; case 'f': if (strcmp(optarg, "-") == 0) optarg = NULL; argv[fopt_count++] = optarg; break; case 't': get_keytypes = 0; tname = strtok(optarg, ","); while (tname) { int type = key_type_from_name(tname); switch (type) { case KEY_RSA1: get_keytypes |= KT_RSA1; break; case KEY_DSA: get_keytypes |= KT_DSA; break; case KEY_ECDSA: get_keytypes |= KT_ECDSA; break; case KEY_RSA: get_keytypes |= KT_RSA; break; case KEY_UNSPEC: fatal("unknown key type %s", tname); } tname = strtok(NULL, ","); } break; case '4': IPv4or6 = AF_INET; break; case '6': IPv4or6 = AF_INET6; break; case '?': default: usage(); } } if (optind == argc && !fopt_count) usage(); log_init("ssh-keyscan", log_level, SYSLOG_FACILITY_USER, 1); maxfd = fdlim_get(1); if (maxfd < 0) fatal("%s: fdlim_get: bad value", __progname); if (maxfd > MAXMAXFD) maxfd = MAXMAXFD; if (MAXCON <= 0) fatal("%s: not enough file descriptors", __progname); if (maxfd > fdlim_get(0)) fdlim_set(maxfd); fdcon = xcalloc(maxfd, sizeof(con)); read_wait_nfdset = howmany(maxfd, NFDBITS); read_wait = xcalloc(read_wait_nfdset, sizeof(fd_mask)); for (j = 0; j < fopt_count; j++) { if (argv[j] == NULL) fp = stdin; else if ((fp = fopen(argv[j], "r")) == NULL) fatal("%s: %s: %s", __progname, argv[j], strerror(errno)); linenum = 0; while (read_keyfile_line(fp, argv[j] == NULL ? "(stdin)" : argv[j], line, sizeof(line), &linenum) != -1) { /* Chomp off trailing whitespace and comments */ if ((cp = strchr(line, '#')) == NULL) cp = line + strlen(line) - 1; while (cp >= line) { if (*cp == ' ' || *cp == '\t' || *cp == '\n' || *cp == '#') *cp-- = '\0'; else break; } /* Skip empty lines */ if (*line == '\0') continue; do_host(line); } if (ferror(fp)) fatal("%s: %s: %s", __progname, argv[j], strerror(errno)); fclose(fp); } while (optind < argc) do_host(argv[optind++]); while (ncon > 0) conloop(); return (0); }
void viomb_attach(struct device *parent, struct device *self, void *aux) { struct viomb_softc *sc = (struct viomb_softc *)self; struct virtio_softc *vsc = (struct virtio_softc *)parent; u_int32_t features; int i; if (vsc->sc_child != NULL) { printf("child already attached for %s; something wrong...\n", parent->dv_xname); return; } /* fail on non-4K page size archs */ if (VIRTIO_PAGE_SIZE != PAGE_SIZE){ printf("non-4K page size arch found, needs %d, got %d\n", VIRTIO_PAGE_SIZE, PAGE_SIZE); return; } sc->sc_virtio = vsc; vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE]; vsc->sc_nvqs = 0; vsc->sc_child = self; vsc->sc_ipl = IPL_BIO; vsc->sc_config_change = viomb_config_change; vsc->sc_intrhand = virtio_vq_intr; /* negotiate features */ features = VIRTIO_F_RING_INDIRECT_DESC; features = virtio_negotiate_features(vsc, features, viomb_feature_names); if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0)) goto err; vsc->sc_nvqs++; if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0)) goto err; vsc->sc_nvqs++; sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr; sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr; virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]); virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]); viomb_read_config(sc); TAILQ_INIT(&sc->sc_balloon_pages); if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ, PR_NOWAIT|PR_ZERO)) == NULL) { printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ, 1, sizeof(u_int32_t) * PGS_PER_REQ, 0, BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) { printf("%s: dmamap creation failed.\n", DEVNAME(sc)); goto err; } if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap, &sc->sc_req.bl_pages[0], sizeof(uint32_t) * PGS_PER_REQ, NULL, BUS_DMA_NOWAIT)) { printf("%s: dmamap load failed.\n", DEVNAME(sc)); goto err_dmamap; } sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO); if (sc->sc_taskq == NULL) goto err_dmamap; task_set(&sc->sc_task, viomb_worker, sc, NULL); printf("\n"); return; err_dmamap: bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap); err: if (sc->sc_req.bl_pages) dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ); for (i = 0; i < vsc->sc_nvqs; i++) virtio_free_vq(vsc, &sc->sc_vq[i]); vsc->sc_nvqs = 0; vsc->sc_child = VIRTIO_CHILD_ERROR; return; }
static int mprsas_add_device(struct mpr_softc *sc, u16 handle, u8 linkrate){ char devstring[80]; struct mprsas_softc *sassc; struct mprsas_target *targ; Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t config_page; uint64_t sas_address, parent_sas_address = 0; u32 device_info, parent_devinfo = 0; unsigned int id; int ret = 1, error = 0, i; struct mprsas_lun *lun; u8 is_SATA_SSD = 0; struct mpr_command *cm; sassc = sc->sassc; mprsas_startup_increment(sassc); if ((mpr_config_get_sas_device_pg0(sc, &mpi_reply, &config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printf("%s: error reading SAS device page0\n", __func__); error = ENXIO; goto out; } device_info = le32toh(config_page.DeviceInfo); if (((device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) && (le16toh(config_page.ParentDevHandle) != 0)) { Mpi2ConfigReply_t tmp_mpi_reply; Mpi2SasDevicePage0_t parent_config_page; if ((mpr_config_get_sas_device_pg0(sc, &tmp_mpi_reply, &parent_config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, le16toh(config_page.ParentDevHandle)))) { printf("%s: error reading SAS device %#x page0\n", __func__, le16toh(config_page.ParentDevHandle)); } else { parent_sas_address = parent_config_page.SASAddress.High; parent_sas_address = (parent_sas_address << 32) | parent_config_page.SASAddress.Low; parent_devinfo = le32toh(parent_config_page.DeviceInfo); } } /* TODO Check proper endianness */ sas_address = config_page.SASAddress.High; sas_address = (sas_address << 32) | config_page.SASAddress.Low; mpr_dprint(sc, MPR_INFO, "SAS Address from SAS device page0 = %jx\n", sas_address); /* * Always get SATA Identify information because this is used to * determine if Start/Stop Unit should be sent to the drive when the * system is shutdown. */ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) { ret = mprsas_get_sas_address_for_sata_disk(sc, &sas_address, handle, device_info, &is_SATA_SSD); if (ret) { mpr_dprint(sc, MPR_ERROR, "%s: failed to get disk type " "(SSD or HDD) for SATA device with handle 0x%04x\n", __func__, handle); } else { mpr_dprint(sc, MPR_INFO, "SAS Address from SATA " "device = %jx\n", sas_address); } } id = mpr_mapping_get_sas_id(sc, sas_address, handle); if (id == MPR_MAP_BAD_ID) { printf("failure at %s:%d/%s()! Could not get ID for device " "with handle 0x%04x\n", __FILE__, __LINE__, __func__, handle); error = ENXIO; goto out; } if (mprsas_check_id(sassc, id) != 0) { device_printf(sc->mpr_dev, "Excluding target id %d\n", id); error = ENXIO; goto out; } mpr_dprint(sc, MPR_MAPPING, "SAS Address from SAS device page0 = %jx\n", sas_address); targ = &sassc->targets[id]; targ->devinfo = device_info; targ->devname = le32toh(config_page.DeviceName.High); targ->devname = (targ->devname << 32) | le32toh(config_page.DeviceName.Low); targ->encl_handle = le16toh(config_page.EnclosureHandle); targ->encl_slot = le16toh(config_page.Slot); targ->encl_level = config_page.EnclosureLevel; targ->connector_name[0] = config_page.ConnectorName[0]; targ->connector_name[1] = config_page.ConnectorName[1]; targ->connector_name[2] = config_page.ConnectorName[2]; targ->connector_name[3] = config_page.ConnectorName[3]; targ->handle = handle; targ->parent_handle = le16toh(config_page.ParentDevHandle); targ->sasaddr = mpr_to_u64(&config_page.SASAddress); targ->parent_sasaddr = le64toh(parent_sas_address); targ->parent_devinfo = parent_devinfo; targ->tid = id; targ->linkrate = (linkrate>>4); targ->flags = 0; if (is_SATA_SSD) { targ->flags = MPR_TARGET_IS_SATA_SSD; } if (le16toh(config_page.Flags) & MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) { targ->scsi_req_desc_type = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; } if (le16toh(config_page.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { targ->encl_level_valid = TRUE; } TAILQ_INIT(&targ->commands); TAILQ_INIT(&targ->timedout_commands); while (!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPR); } SLIST_INIT(&targ->luns); mpr_describe_devinfo(targ->devinfo, devstring, 80); mpr_dprint(sc, (MPR_INFO|MPR_MAPPING), "Found device <%s> <%s> " "handle<0x%04x> enclosureHandle<0x%04x> slot %d\n", devstring, mpr_describe_table(mpr_linkrate_names, targ->linkrate), targ->handle, targ->encl_handle, targ->encl_slot); if (targ->encl_level_valid) { mpr_dprint(sc, (MPR_INFO|MPR_MAPPING), "At enclosure level %d " "and connector name (%4s)\n", targ->encl_level, targ->connector_name); } #if ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000039)) || \ (__FreeBSD_version < 902502) if ((sassc->flags & MPRSAS_IN_STARTUP) == 0) #endif mprsas_rescan_target(sc, targ); mpr_dprint(sc, MPR_MAPPING, "Target id 0x%x added\n", targ->tid); /* * Check all commands to see if the SATA_ID_TIMEOUT flag has been set. * If so, send a Target Reset TM to the target that was just created. * An Abort Task TM should be used instead of a Target Reset, but that * would be much more difficult because targets have not been fully * discovered yet, and LUN's haven't been setup. So, just reset the * target instead of the LUN. */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { targ->timeouts++; cm->cm_state = MPR_CM_STATE_TIMEDOUT; if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { mpr_dprint(sc, MPR_INFO, "%s: sending Target " "Reset for stuck SATA identify command " "(cm = %p)\n", __func__, cm); targ->tm->cm_targ = targ; mprsas_send_reset(sc, targ->tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); } else { mpr_dprint(sc, MPR_ERROR, "Failed to allocate " "tm for Target Reset after SATA ID " "command timed out (cm %p)\n", cm); } /* * No need to check for more since the target is * already being reset. */ break; } } out: /* * Free the commands that may not have been freed from the SATA ID call */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { mpr_free_command(sc, cm); } } mprsas_startup_decrement(sassc); return (error); }
static int outputs_integer_cb(void *params_, long long val) { #else static int outputs_integer_cb(void *params_, long val) { #endif struct outputs_json_params *params = (struct outputs_json_params*) params_; if (!strcmp(params->cur_key, "current_workspace")) { params->outputs_walk->ws = (int) val; FREE(params->cur_key); return 1; } if (!strcmp(params->cur_key, "x")) { params->outputs_walk->rect.x = (int) val; FREE(params->cur_key); return 1; } if (!strcmp(params->cur_key, "y")) { params->outputs_walk->rect.y = (int) val; FREE(params->cur_key); return 1; } if (!strcmp(params->cur_key, "width")) { params->outputs_walk->rect.w = (int) val; FREE(params->cur_key); return 1; } if (!strcmp(params->cur_key, "height")) { params->outputs_walk->rect.h = (int) val; FREE(params->cur_key); return 1; } return 0; } /* * Parse a string (name) * */ #if YAJL_MAJOR >= 2 static int outputs_string_cb(void *params_, const unsigned char *val, size_t len) { #else static int outputs_string_cb(void *params_, const unsigned char *val, unsigned int len) { #endif struct outputs_json_params *params = (struct outputs_json_params*) params_; if (!strcmp(params->cur_key, "current_workspace")) { char *copy = malloc(sizeof(const unsigned char) * (len + 1)); strncpy(copy, (const char*) val, len); copy[len] = '\0'; char *end; errno = 0; long parsed_num = strtol(copy, &end, 10); if (errno == 0 && (end && *end == '\0')) params->outputs_walk->ws = parsed_num; free(copy); FREE(params->cur_key); return 1; } if (strcmp(params->cur_key, "name")) { return 0; } char *name = malloc(sizeof(const unsigned char) * (len + 1)); strncpy(name, (const char*) val, len); name[len] = '\0'; params->outputs_walk->name = name; FREE(params->cur_key); return 1; } /* * We hit the start of a json-map (rect or a new output) * */ static int outputs_start_map_cb(void *params_) { struct outputs_json_params *params = (struct outputs_json_params*) params_; i3_output *new_output = NULL; if (params->cur_key == NULL) { new_output = malloc(sizeof(i3_output)); new_output->name = NULL; new_output->ws = 0, memset(&new_output->rect, 0, sizeof(rect)); new_output->bar = XCB_NONE; new_output->workspaces = malloc(sizeof(struct ws_head)); TAILQ_INIT(new_output->workspaces); params->outputs_walk = new_output; return 1; } return 1; } /* * We hit the end of a map (rect or a new output) * */ static int outputs_end_map_cb(void *params_) { struct outputs_json_params *params = (struct outputs_json_params*) params_; /* FIXME: What is at the end of a rect? */ i3_output *target = get_output_by_name(params->outputs_walk->name); if (target == NULL) { SLIST_INSERT_HEAD(outputs, params->outputs_walk, slist); } else { target->active = params->outputs_walk->active; target->ws = params->outputs_walk->ws; target->rect = params->outputs_walk->rect; } return 1; } /* * Parse a key. * * Essentially we just save it in the parsing-state * */ #if YAJL_MAJOR >= 2 static int outputs_map_key_cb(void *params_, const unsigned char *keyVal, size_t keyLen) { #else static int outputs_map_key_cb(void *params_, const unsigned char *keyVal, unsigned keyLen) { #endif struct outputs_json_params *params = (struct outputs_json_params*) params_; FREE(params->cur_key); params->cur_key = malloc(sizeof(unsigned char) * (keyLen + 1)); strncpy(params->cur_key, (const char*) keyVal, keyLen); params->cur_key[keyLen] = '\0'; return 1; } /* A datastructure to pass all these callbacks to yajl */ yajl_callbacks outputs_callbacks = { &outputs_null_cb, &outputs_boolean_cb, &outputs_integer_cb, NULL, NULL, &outputs_string_cb, &outputs_start_map_cb, &outputs_map_key_cb, &outputs_end_map_cb, NULL, NULL }; /* * Initiate the output-list * */ void init_outputs() { outputs = malloc(sizeof(struct outputs_head)); SLIST_INIT(outputs); } /* * Start parsing the received json-string * */ void parse_outputs_json(char *json) { struct outputs_json_params params; params.outputs_walk = NULL; params.cur_key = NULL; params.json = json; yajl_handle handle; yajl_status state; #if YAJL_MAJOR < 2 yajl_parser_config parse_conf = { 0, 0 }; handle = yajl_alloc(&outputs_callbacks, &parse_conf, NULL, (void*) ¶ms); #else handle = yajl_alloc(&outputs_callbacks, NULL, (void*) ¶ms); #endif state = yajl_parse(handle, (const unsigned char*) json, strlen(json)); /* FIXME: Propper errorhandling for JSON-parsing */ switch (state) { case yajl_status_ok: break; case yajl_status_client_canceled: #if YAJL_MAJOR < 2 case yajl_status_insufficient_data: #endif case yajl_status_error: ELOG("Could not parse outputs-reply!\n"); exit(EXIT_FAILURE); break; } yajl_free(handle); } /* * Returns the output with the given name * */ i3_output *get_output_by_name(char *name) { i3_output *walk; if (name == NULL) { return NULL; } SLIST_FOREACH(walk, outputs, slist) { if (!strcmp(walk->name, name)) { break; } } return walk; }
/* add_redirect_rule2() : * create a rdr rule */ int add_redirect_rule2(const char * ifname, const char * rhost, unsigned short eport, const char * iaddr, unsigned short iport, int proto, const char * desc, unsigned int timestamp) { int r; struct pfioc_rule pcr; #ifndef PF_NEWSTYLE struct pfioc_pooladdr pp; struct pf_pooladdr *a; #endif if(dev<0) { syslog(LOG_ERR, "pf device is not open"); return -1; } r = 0; memset(&pcr, 0, sizeof(pcr)); strlcpy(pcr.anchor, anchor_name, MAXPATHLEN); #ifndef PF_NEWSTYLE memset(&pp, 0, sizeof(pp)); strlcpy(pp.anchor, anchor_name, MAXPATHLEN); if(ioctl(dev, DIOCBEGINADDRS, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCBEGINADDRS, ...): %m"); r = -1; } else { pcr.pool_ticket = pp.ticket; #else if(1) { pcr.rule.direction = PF_IN; /*pcr.rule.src.addr.type = PF_ADDR_NONE;*/ pcr.rule.src.addr.type = PF_ADDR_ADDRMASK; pcr.rule.dst.addr.type = PF_ADDR_ADDRMASK; pcr.rule.nat.addr.type = PF_ADDR_NONE; pcr.rule.rdr.addr.type = PF_ADDR_ADDRMASK; #endif #ifdef __APPLE__ pcr.rule.dst.xport.range.op = PF_OP_EQ; pcr.rule.dst.xport.range.port[0] = htons(eport); pcr.rule.dst.xport.range.port[1] = htons(eport); #else pcr.rule.dst.port_op = PF_OP_EQ; pcr.rule.dst.port[0] = htons(eport); pcr.rule.dst.port[1] = htons(eport); #endif #ifndef PF_NEWSTYLE pcr.rule.action = PF_RDR; #ifndef PF_ENABLE_FILTER_RULES pcr.rule.natpass = 1; #else pcr.rule.natpass = 0; #endif #else #ifndef PF_ENABLE_FILTER_RULES pcr.rule.action = PF_PASS; #else pcr.rule.action = PF_MATCH; #endif #endif pcr.rule.af = AF_INET; #ifdef USE_IFNAME_IN_RULES if(ifname) strlcpy(pcr.rule.ifname, ifname, IFNAMSIZ); #endif pcr.rule.proto = proto; pcr.rule.log = (GETFLAG(LOGPACKETSMASK))?1:0; /*logpackets;*/ #ifdef PFRULE_HAS_RTABLEID pcr.rule.rtableid = -1; /* first appeared in OpenBSD 4.0 */ #endif #ifdef PFRULE_HAS_ONRDOMAIN pcr.rule.onrdomain = -1; /* first appeared in OpenBSD 5.0 */ #endif pcr.rule.quick = 1; pcr.rule.keep_state = PF_STATE_NORMAL; if(tag) strlcpy(pcr.rule.tagname, tag, PF_TAG_NAME_SIZE); strlcpy(pcr.rule.label, desc, PF_RULE_LABEL_SIZE); if(rhost && rhost[0] != '\0' && rhost[0] != '*') { inet_pton(AF_INET, rhost, &pcr.rule.src.addr.v.a.addr.v4.s_addr); pcr.rule.src.addr.v.a.mask.v4.s_addr = htonl(INADDR_NONE); } #ifndef PF_NEWSTYLE pcr.rule.rpool.proxy_port[0] = iport; pcr.rule.rpool.proxy_port[1] = iport; TAILQ_INIT(&pcr.rule.rpool.list); a = calloc(1, sizeof(struct pf_pooladdr)); inet_pton(AF_INET, iaddr, &a->addr.v.a.addr.v4.s_addr); a->addr.v.a.mask.v4.s_addr = htonl(INADDR_NONE); TAILQ_INSERT_TAIL(&pcr.rule.rpool.list, a, entries); memcpy(&pp.addr, a, sizeof(struct pf_pooladdr)); if(ioctl(dev, DIOCADDADDR, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCADDADDR, ...): %m"); r = -1; } else { #else pcr.rule.rdr.proxy_port[0] = iport; pcr.rule.rdr.proxy_port[1] = iport; inet_pton(AF_INET, iaddr, &pcr.rule.rdr.addr.v.a.addr.v4.s_addr); pcr.rule.rdr.addr.v.a.mask.v4.s_addr = htonl(INADDR_NONE); if(1) { #endif pcr.action = PF_CHANGE_GET_TICKET; if(ioctl(dev, DIOCCHANGERULE, &pcr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_GET_TICKET: %m"); r = -1; } else { pcr.action = PF_CHANGE_ADD_TAIL; if(ioctl(dev, DIOCCHANGERULE, &pcr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_ADD_TAIL: %m"); r = -1; } } } #ifndef PF_NEWSTYLE free(a); #endif } if(r == 0 && timestamp > 0) { struct timestamp_entry * tmp; tmp = malloc(sizeof(struct timestamp_entry)); if(tmp) { tmp->next = timestamp_list; tmp->timestamp = timestamp; tmp->eport = eport; tmp->protocol = (short)proto; timestamp_list = tmp; } } return r; } /* thanks to Seth Mos for this function */ int add_filter_rule2(const char * ifname, const char * rhost, const char * iaddr, unsigned short eport, unsigned short iport, int proto, const char * desc) { #ifndef PF_ENABLE_FILTER_RULES UNUSED(ifname); UNUSED(rhost); UNUSED(iaddr); UNUSED(eport); UNUSED(iport); UNUSED(proto); UNUSED(desc); return 0; #else int r; struct pfioc_rule pcr; #ifndef PF_NEWSTYLE struct pfioc_pooladdr pp; #endif #ifndef USE_IFNAME_IN_RULES UNUSED(ifname); #endif UNUSED(eport); if(dev<0) { syslog(LOG_ERR, "pf device is not open"); return -1; } r = 0; memset(&pcr, 0, sizeof(pcr)); strlcpy(pcr.anchor, anchor_name, MAXPATHLEN); #ifndef PF_NEWSTYLE memset(&pp, 0, sizeof(pp)); strlcpy(pp.anchor, anchor_name, MAXPATHLEN); if(ioctl(dev, DIOCBEGINADDRS, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCBEGINADDRS, ...): %m"); r = -1; } else { pcr.pool_ticket = pp.ticket; #else if(1) { #endif pcr.rule.dst.port_op = PF_OP_EQ; pcr.rule.dst.port[0] = htons(iport); pcr.rule.direction = PF_IN; pcr.rule.action = PF_PASS; pcr.rule.af = AF_INET; #ifdef USE_IFNAME_IN_RULES if(ifname) strlcpy(pcr.rule.ifname, ifname, IFNAMSIZ); #endif pcr.rule.proto = proto; pcr.rule.quick = (GETFLAG(PFNOQUICKRULESMASK))?0:1; pcr.rule.log = (GETFLAG(LOGPACKETSMASK))?1:0; /*logpackets;*/ /* see the discussion on the forum : * http://miniupnp.tuxfamily.org/forum/viewtopic.php?p=638 */ pcr.rule.flags = TH_SYN; pcr.rule.flagset = (TH_SYN|TH_ACK); #ifdef PFRULE_HAS_RTABLEID pcr.rule.rtableid = -1; /* first appeared in OpenBSD 4.0 */ #endif #ifdef PFRULE_HAS_ONRDOMAIN pcr.rule.onrdomain = -1; /* first appeared in OpenBSD 5.0 */ #endif pcr.rule.keep_state = 1; strlcpy(pcr.rule.label, desc, PF_RULE_LABEL_SIZE); if(queue) strlcpy(pcr.rule.qname, queue, PF_QNAME_SIZE); if(tag) strlcpy(pcr.rule.tagname, tag, PF_TAG_NAME_SIZE); if(rhost && rhost[0] != '\0' && rhost[0] != '*') { inet_pton(AF_INET, rhost, &pcr.rule.src.addr.v.a.addr.v4.s_addr); pcr.rule.src.addr.v.a.mask.v4.s_addr = htonl(INADDR_NONE); } /* we want any - iaddr port = # keep state label */ inet_pton(AF_INET, iaddr, &pcr.rule.dst.addr.v.a.addr.v4.s_addr); pcr.rule.dst.addr.v.a.mask.v4.s_addr = htonl(INADDR_NONE); #ifndef PF_NEWSTYLE pcr.rule.rpool.proxy_port[0] = iport; pcr.rule.rpool.proxy_port[1] = iport; TAILQ_INIT(&pcr.rule.rpool.list); #endif if(1) { pcr.action = PF_CHANGE_GET_TICKET; if(ioctl(dev, DIOCCHANGERULE, &pcr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_GET_TICKET: %m"); r = -1; } else { pcr.action = PF_CHANGE_ADD_TAIL; if(ioctl(dev, DIOCCHANGERULE, &pcr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_ADD_TAIL: %m"); r = -1; } } } } return r; #endif } /* get_redirect_rule() * return value : 0 success (found) * -1 = error or rule not found */ int get_redirect_rule(const char * ifname, unsigned short eport, int proto, char * iaddr, int iaddrlen, unsigned short * iport, char * desc, int desclen, char * rhost, int rhostlen, unsigned int * timestamp, u_int64_t * packets, u_int64_t * bytes) { int i, n; struct pfioc_rule pr; #ifndef PF_NEWSTYLE struct pfioc_pooladdr pp; #endif UNUSED(ifname); if(dev<0) { syslog(LOG_ERR, "pf device is not open"); return -1; } memset(&pr, 0, sizeof(pr)); strlcpy(pr.anchor, anchor_name, MAXPATHLEN); #ifndef PF_NEWSTYLE pr.rule.action = PF_RDR; #endif if(ioctl(dev, DIOCGETRULES, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETRULES, ...): %m"); goto error; } n = pr.nr; for(i=0; i<n; i++) { pr.nr = i; if(ioctl(dev, DIOCGETRULE, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETRULE): %m"); goto error; } #ifdef __APPLE__ if( (eport == ntohs(pr.rule.dst.xport.range.port[0])) && (eport == ntohs(pr.rule.dst.xport.range.port[1])) #else if( (eport == ntohs(pr.rule.dst.port[0])) && (eport == ntohs(pr.rule.dst.port[1])) #endif && (pr.rule.proto == proto) ) { #ifndef PF_NEWSTYLE *iport = pr.rule.rpool.proxy_port[0]; #else *iport = pr.rule.rdr.proxy_port[0]; #endif if(desc) strlcpy(desc, pr.rule.label, desclen); #ifdef PFRULE_INOUT_COUNTS if(packets) *packets = pr.rule.packets[0] + pr.rule.packets[1]; if(bytes) *bytes = pr.rule.bytes[0] + pr.rule.bytes[1]; #else if(packets) *packets = pr.rule.packets; if(bytes) *bytes = pr.rule.bytes; #endif #ifndef PF_NEWSTYLE memset(&pp, 0, sizeof(pp)); strlcpy(pp.anchor, anchor_name, MAXPATHLEN); pp.r_action = PF_RDR; pp.r_num = i; pp.ticket = pr.ticket; if(ioctl(dev, DIOCGETADDRS, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETADDRS, ...): %m"); goto error; } if(pp.nr != 1) { syslog(LOG_NOTICE, "No address associated with pf rule"); goto error; } pp.nr = 0; /* first */ if(ioctl(dev, DIOCGETADDR, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETADDR, ...): %m"); goto error; } inet_ntop(AF_INET, &pp.addr.addr.v.a.addr.v4.s_addr, iaddr, iaddrlen); #else inet_ntop(AF_INET, &pr.rule.rdr.addr.v.a.addr.v4.s_addr, iaddr, iaddrlen); #endif if(rhost && rhostlen > 0) { if (pr.rule.src.addr.v.a.addr.v4.s_addr == 0) { rhost[0] = '\0'; /* empty string */ } else { inet_ntop(AF_INET, &pr.rule.src.addr.v.a.addr.v4.s_addr, rhost, rhostlen); } } if(timestamp) *timestamp = get_timestamp(eport, proto); return 0; } } error: return -1; } static int priv_delete_redirect_rule(const char * ifname, unsigned short eport, int proto, unsigned short * iport, in_addr_t * iaddr) { int i, n; struct pfioc_rule pr; UNUSED(ifname); if(dev<0) { syslog(LOG_ERR, "pf device is not open"); return -1; } memset(&pr, 0, sizeof(pr)); strlcpy(pr.anchor, anchor_name, MAXPATHLEN); #ifndef PF_NEWSTYLE pr.rule.action = PF_RDR; #endif if(ioctl(dev, DIOCGETRULES, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETRULES, ...): %m"); goto error; } n = pr.nr; for(i=0; i<n; i++) { pr.nr = i; if(ioctl(dev, DIOCGETRULE, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETRULE): %m"); goto error; } #ifdef __APPLE__ if( (eport == ntohs(pr.rule.dst.xport.range.port[0])) && (eport == ntohs(pr.rule.dst.xport.range.port[1])) #else if( (eport == ntohs(pr.rule.dst.port[0])) && (eport == ntohs(pr.rule.dst.port[1])) #endif && (pr.rule.proto == proto) ) { /* retrieve iport in order to remove filter rule */ #ifndef PF_NEWSTYLE if(iport) *iport = pr.rule.rpool.proxy_port[0]; if(iaddr) { /* retrieve internal address */ struct pfioc_pooladdr pp; memset(&pp, 0, sizeof(pp)); strlcpy(pp.anchor, anchor_name, MAXPATHLEN); pp.r_action = PF_RDR; pp.r_num = i; pp.ticket = pr.ticket; if(ioctl(dev, DIOCGETADDRS, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETADDRS, ...): %m"); goto error; } if(pp.nr != 1) { syslog(LOG_NOTICE, "No address associated with pf rule"); goto error; } pp.nr = 0; /* first */ if(ioctl(dev, DIOCGETADDR, &pp) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCGETADDR, ...): %m"); goto error; } *iaddr = pp.addr.addr.v.a.addr.v4.s_addr; } #else if(iport) *iport = pr.rule.rdr.proxy_port[0]; if(iaddr) { /* retrieve internal address */ *iaddr = pr.rule.rdr.addr.v.a.addr.v4.s_addr; } #endif pr.action = PF_CHANGE_GET_TICKET; if(ioctl(dev, DIOCCHANGERULE, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_GET_TICKET: %m"); goto error; } pr.action = PF_CHANGE_REMOVE; pr.nr = i; if(ioctl(dev, DIOCCHANGERULE, &pr) < 0) { syslog(LOG_ERR, "ioctl(dev, DIOCCHANGERULE, ...) PF_CHANGE_REMOVE: %m"); goto error; } remove_timestamp_entry(eport, proto); return 0; } } error: return -1; }
int main(int argc, const char **argv) { struct partition_metadata *md; const char *prompt; struct partedit_item *items = NULL; struct gmesh mesh; int i, op, nitems, nscroll; int error; TAILQ_INIT(&part_metadata); init_fstab_metadata(); init_dialog(stdin, stdout); if (strcmp(basename(argv[0]), "sade") != 0) dialog_vars.backtitle = __DECONST(char *, "FreeBSD Installer"); dialog_vars.item_help = TRUE; nscroll = i = 0; /* Revert changes on SIGINT */ signal(SIGINT, sigint_handler); if (strcmp(basename(argv[0]), "autopart") == 0) { /* Guided */ prompt = "Please review the disk setup. When complete, press " "the Finish button."; part_wizard(); } else { prompt = "Create partitions for FreeBSD. No changes will be " "made until you select Finish."; } /* Show the part editor either immediately, or to confirm wizard */ while (1) { dlg_clear(); dlg_put_backtitle(); error = geom_gettree(&mesh); if (error == 0) items = read_geom_mesh(&mesh, &nitems); if (error || items == NULL) { dialog_msgbox("Error", "No disks found. If you need to " "install a kernel driver, choose Shell at the " "installation menu.", 0, 0, TRUE); break; } get_mount_points(items, nitems); if (i >= nitems) i = nitems - 1; op = diskeditor_show("Partition Editor", prompt, items, nitems, &i, &nscroll); switch (op) { case 0: /* Create */ gpart_create((struct gprovider *)(items[i].cookie), NULL, NULL, NULL, NULL, 1); break; case 1: /* Delete */ gpart_delete((struct gprovider *)(items[i].cookie)); break; case 2: /* Modify */ gpart_edit((struct gprovider *)(items[i].cookie)); break; case 3: /* Revert */ gpart_revert_all(&mesh); while ((md = TAILQ_FIRST(&part_metadata)) != NULL) { if (md->fstab != NULL) { free(md->fstab->fs_spec); free(md->fstab->fs_file); free(md->fstab->fs_vfstype); free(md->fstab->fs_mntops); free(md->fstab->fs_type); free(md->fstab); } if (md->newfs != NULL) free(md->newfs); free(md->name); TAILQ_REMOVE(&part_metadata, md, metadata); free(md); } init_fstab_metadata(); break; case 4: /* Auto */ part_wizard(); break; } error = 0; if (op == 5) { /* Finished */ dialog_vars.ok_label = __DECONST(char *, "Commit"); dialog_vars.extra_label = __DECONST(char *, "Revert & Exit"); dialog_vars.extra_button = TRUE; dialog_vars.cancel_label = __DECONST(char *, "Back"); op = dialog_yesno("Confirmation", "Your changes will " "now be written to disk. If you have chosen to " "overwrite existing data, it will be PERMANENTLY " "ERASED. Are you sure you want to commit your " "changes?", 0, 0); dialog_vars.ok_label = NULL; dialog_vars.extra_button = FALSE; dialog_vars.cancel_label = NULL; if (op == 0 && validate_setup()) { /* Save */ error = apply_changes(&mesh); break; } else if (op == 3) { /* Quit */ gpart_revert_all(&mesh); error = -1; break; } } geom_deletetree(&mesh); free(items); }
int main(int argc, char **argv) { char *CheckPkg = NULL; char *BestCheckPkg = NULL; lpkg_t *lpp; int ch; int rc; setprogname(argv[0]); while ((ch = getopt(argc, argv, Options)) != -1) switch (ch) { case '.': /* for backward compatibility */ break; case 'a': Which = WHICH_ALL; break; case 'B': Flags |= SHOW_BUILD_INFO; break; case 'b': Flags |= SHOW_BUILD_VERSION; break; case 'c': Flags |= SHOW_COMMENT; break; case 'D': Flags |= SHOW_DISPLAY; break; case 'd': Flags |= SHOW_DESC; break; case 'E': BestCheckPkg = optarg; break; case 'e': CheckPkg = optarg; break; case 'f': Flags |= SHOW_PLIST; break; case 'F': File2Pkg = 1; break; case 'I': Flags |= SHOW_INDEX; break; case 'i': Flags |= SHOW_INSTALL; break; case 'K': pkgdb_set_dir(optarg, 3); break; case 'k': Flags |= SHOW_DEINSTALL; break; case 'L': Flags |= SHOW_FILES; break; case 'l': InfoPrefix = optarg; break; case 'm': Flags |= SHOW_MTREE; break; case 'N': Flags |= SHOW_BLD_DEPENDS; break; case 'n': Flags |= SHOW_DEPENDS; break; case 'p': Flags |= SHOW_PREFIX; break; case 'Q': Flags |= SHOW_BI_VAR; BuildInfoVariable = optarg; break; case 'q': Quiet = TRUE; break; case 'r': Flags |= SHOW_FULL_REQBY; break; case 'R': Flags |= SHOW_REQBY; break; case 's': Flags |= SHOW_PKG_SIZE; break; case 'S': Flags |= SHOW_ALL_SIZE; break; case 'u': Which = WHICH_USER; break; case 'v': Verbose = TRUE; /* Reasonable definition of 'everything' */ Flags = SHOW_COMMENT | SHOW_DESC | SHOW_PLIST | SHOW_INSTALL | SHOW_DEINSTALL | SHOW_DISPLAY | SHOW_MTREE | SHOW_REQBY | SHOW_BLD_DEPENDS | SHOW_DEPENDS | SHOW_PKG_SIZE | SHOW_ALL_SIZE; break; case 'V': show_version(); /* NOTREACHED */ case 'X': Flags |= SHOW_SUMMARY; break; case 'h': case '?': default: usage(); /* NOTREACHED */ } argc -= optind; argv += optind; pkg_install_config(); if (argc == 0 && !Flags && !CheckPkg) { /* No argument or relevant flags specified - assume -I */ Flags = SHOW_INDEX; /* assume -a if neither -u nor -a is given */ if (Which == WHICH_LIST) Which = WHICH_ALL; } if (CheckPkg != NULL && BestCheckPkg != NULL) { warnx("-E and -e are mutally exlusive"); usage(); } if (argc != 0 && CheckPkg != NULL) { warnx("can't give any additional arguments to -e"); usage(); } if (argc != 0 && BestCheckPkg != NULL) { warnx("can't give any additional arguments to -E"); usage(); } if (argc != 0 && Which != WHICH_LIST) { warnx("can't use both -a/-u and package name"); usage(); } /* Set some reasonable defaults */ if (!Flags) Flags = SHOW_COMMENT | SHOW_DESC | SHOW_REQBY | SHOW_DEPENDS | SHOW_DISPLAY; /* -Fe /filename -> change CheckPkg to real packagename */ if (CheckPkg) { if (File2Pkg) { char *s; if (!pkgdb_open(ReadOnly)) err(EXIT_FAILURE, "cannot open pkgdb"); s = pkgdb_retrieve(CheckPkg); if (s == NULL) errx(EXIT_FAILURE, "No matching pkg for %s.", CheckPkg); CheckPkg = xstrdup(s); pkgdb_close(); } return CheckForPkg(CheckPkg); } if (BestCheckPkg) return CheckForBestPkg(BestCheckPkg); TAILQ_INIT(&pkgs); /* Get all the remaining package names, if any */ if (File2Pkg && Which == WHICH_LIST) if (!pkgdb_open(ReadOnly)) { err(EXIT_FAILURE, "cannot open pkgdb"); } while (*argv) { /* pkgdb: if -F flag given, don't add pkgnames to the "pkgs" * queue but rather resolve the given filenames to pkgnames * using pkgdb_retrieve, then add them. */ if (File2Pkg) { char *s; s = pkgdb_retrieve(*argv); if (s) { lpp = alloc_lpkg(s); TAILQ_INSERT_TAIL(&pkgs, lpp, lp_link); } else errx(EXIT_FAILURE, "No matching pkg for %s.", *argv); } else { if (ispkgpattern(*argv)) { switch (add_installed_pkgs_by_pattern(*argv, &pkgs)) { case 0: errx(EXIT_FAILURE, "No matching pkg for %s.", *argv); case -1: errx(EXIT_FAILURE, "Error during search in pkgdb for %s", *argv); } } else { const char *dbdir; dbdir = pkgdb_get_dir(); if (**argv == '/' && strncmp(*argv, dbdir, strlen(dbdir)) == 0) { *argv += strlen(dbdir) + 1; if ((*argv)[strlen(*argv) - 1] == '/') { (*argv)[strlen(*argv) - 1] = 0; } } lpp = alloc_lpkg(*argv); TAILQ_INSERT_TAIL(&pkgs, lpp, lp_link); } } argv++; } if (File2Pkg) pkgdb_close(); /* If no packages, yelp */ if (TAILQ_FIRST(&pkgs) == NULL && Which == WHICH_LIST && !CheckPkg) warnx("missing package name(s)"), usage(); rc = pkg_perform(&pkgs); exit(rc); /* NOTREACHED */ }
int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller) { int error; device_t device = controller->isci->device; uint32_t max_segment_size = isci_io_request_get_max_io_size(); uint32_t status = 0; struct ISCI_MEMORY *uncached_controller_memory = &controller->uncached_controller_memory; struct ISCI_MEMORY *cached_controller_memory = &controller->cached_controller_memory; struct ISCI_MEMORY *request_memory = &controller->request_memory; POINTER_UINT virtual_address; bus_addr_t physical_address; controller->mdl = sci_controller_get_memory_descriptor_list_handle( controller->scif_controller_handle); uncached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS); error = isci_allocate_dma_buffer(device, uncached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, uncached_controller_memory->virtual_address, uncached_controller_memory->physical_address); cached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS ); error = isci_allocate_dma_buffer(device, cached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory(controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, cached_controller_memory->virtual_address, cached_controller_memory->physical_address); request_memory->size = controller->queue_depth * isci_io_request_get_object_size(); error = isci_allocate_dma_buffer(device, request_memory); if (error != 0) return (error); /* For STP PIO testing, we want to ensure we can force multiple SGLs * since this has been a problem area in SCIL. This tunable parameter * will allow us to force DMA segments to a smaller size, ensuring * that even if a physically contiguous buffer is attached to this * I/O, the DMA subsystem will pass us multiple segments in our DMA * load callback. */ TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size); /* Create DMA tag for our I/O requests. Then we can create DMA maps based off * of this tag and store them in each of our ISCI_IO_REQUEST objects. This * will enable better performance than creating the DMA maps every time we get * an I/O. */ status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, isci_io_request_get_max_io_size(), SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL, &controller->buffer_dma_tag); sci_pool_initialize(controller->request_pool); virtual_address = request_memory->virtual_address; physical_address = request_memory->physical_address; for (int i = 0; i < controller->queue_depth; i++) { struct ISCI_REQUEST *request = (struct ISCI_REQUEST *)virtual_address; isci_request_construct(request, controller->scif_controller_handle, controller->buffer_dma_tag, physical_address); sci_pool_put(controller->request_pool, request); virtual_address += isci_request_get_object_size(); physical_address += isci_request_get_object_size(); } uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) + scif_remote_device_get_object_size(); controller->remote_device_memory = (uint8_t *) malloc( remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI, M_NOWAIT | M_ZERO); sci_pool_initialize(controller->remote_device_pool); uint8_t *remote_device_memory_ptr = controller->remote_device_memory; for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr; controller->remote_device[i] = NULL; remote_device->index = i; remote_device->is_resetting = FALSE; remote_device->frozen_lun_mask = 0; sci_fast_list_element_init(remote_device, &remote_device->pending_device_reset_element); TAILQ_INIT(&remote_device->queued_ccbs); remote_device->release_queued_ccb = FALSE; remote_device->queued_ccb_in_progress = NULL; /* * For the first SCI_MAX_DOMAINS device objects, do not put * them in the pool, rather assign them to each domain. This * ensures that any device attached directly to port "i" will * always get CAM target id "i". */ if (i < SCI_MAX_DOMAINS) controller->domain[i].da_remote_device = remote_device; else sci_pool_put(controller->remote_device_pool, remote_device); remote_device_memory_ptr += remote_device_size; } return (0); }
pid_t proc_run(struct privsep *ps, struct privsep_proc *p, struct privsep_proc *procs, u_int nproc, void (*init)(struct privsep *, void *), void *arg) { pid_t pid; struct passwd *pw; const char *root; u_int32_t seed[256]; switch (pid = fork()) { case -1: fatal("proc_run: cannot fork"); case 0: break; default: return (pid); } pw = ps->ps_pw; if (p->p_id == PROC_CONTROL) { if (control_init(ps, &ps->ps_csock) == -1) fatalx(p->p_title); } /* Change root directory */ if (p->p_chroot != NULL) root = p->p_chroot; else root = pw->pw_dir; #ifndef DEBUG if (chroot(root) == -1) fatal("proc_run: chroot"); if (chdir("/") == -1) fatal("proc_run: chdir(\"/\")"); #else #warning disabling privilege revocation and chroot in DEBUG MODE if (p->p_chroot != NULL) { if (chroot(root) == -1) fatal("proc_run: chroot"); if (chdir("/") == -1) fatal("proc_run: chdir(\"/\")"); } #endif privsep_process = p->p_id; setproctitle("%s", p->p_title); #ifndef DEBUG if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("proc_run: cannot drop privileges"); #endif event_init(); signal_set(&ps->ps_evsigint, SIGINT, proc_sig_handler, p); signal_set(&ps->ps_evsigterm, SIGTERM, proc_sig_handler, p); signal_set(&ps->ps_evsigchld, SIGCHLD, proc_sig_handler, p); signal_set(&ps->ps_evsighup, SIGHUP, proc_sig_handler, p); signal_set(&ps->ps_evsigpipe, SIGPIPE, proc_sig_handler, p); signal_add(&ps->ps_evsigint, NULL); signal_add(&ps->ps_evsigterm, NULL); signal_add(&ps->ps_evsigchld, NULL); signal_add(&ps->ps_evsighup, NULL); signal_add(&ps->ps_evsigpipe, NULL); proc_config(ps, procs, nproc); arc4random_buf(seed, sizeof(seed)); RAND_seed(seed, sizeof(seed)); if (p->p_id == PROC_CONTROL) { TAILQ_INIT(&ctl_conns); if (control_listen(&ps->ps_csock) == -1) fatalx(p->p_title); } if (init != NULL) init(ps, arg); event_dispatch(); proc_shutdown(p); return (0); }
/*---------------------------------------------------------------------------*/ static int raio_handle_setup(void *prv_session_data, void *prv_portal_data, struct raio_command *cmd, char *cmd_data, struct xio_msg *req) { int fd, j; struct raio_io_session_data *sd = prv_session_data; struct raio_io_portal_data *pd = prv_portal_data; if (3*sizeof(int) != cmd->data_len) { errno = EINVAL; printf("io setup request rejected\n"); goto reject; } unpack_u32((uint32_t *)&pd->iodepth, unpack_u32((uint32_t *)&fd, cmd_data)); pd->io_u_free_nr = pd->iodepth + EXTRA_MSGS; pd->io_us_free = calloc(pd->io_u_free_nr, sizeof(struct raio_io_u)); pd->rsp_pool = msg_pool_create(512, MAXBLOCKSIZE, pd->io_u_free_nr); TAILQ_INIT(&pd->io_u_free_list); /* register each io_u in the free list */ for (j = 0; j < pd->io_u_free_nr; j++) { pd->io_us_free[j].rsp = msg_pool_get(pd->rsp_pool); TAILQ_INSERT_TAIL(&pd->io_u_free_list, &pd->io_us_free[j], io_u_list); } if (sd->is_null) pd->bs_dev = raio_bs_init(pd->ctx, "null"); else pd->bs_dev = raio_bs_init(pd->ctx, "aio"); errno = -raio_bs_open(pd->bs_dev, fd); reject: if (errno) { struct raio_answer ans = { RAIO_CMD_IO_SETUP, 0, -1, errno }; pack_u32((uint32_t *)&ans.ret_errno, pack_u32((uint32_t *)&ans.ret, pack_u32(&ans.data_len, pack_u32(&ans.command, pd->rsp_hdr)))); } else { struct raio_answer ans = { RAIO_CMD_IO_SETUP, 0, 0, 0 }; pack_u32((uint32_t *)&ans.ret_errno, pack_u32((uint32_t *)&ans.ret, pack_u32(&ans.data_len, pack_u32(&ans.command, pd->rsp_hdr)))); } pd->rsp.out.header.iov_len = sizeof(struct raio_answer); pd->rsp.request = req; xio_send_response(&pd->rsp); return 0; }
static int mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in mutex_trylock_common"); /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); /* Return the completion status: */ return (ret); }
void sys$bootstrap(struct vms$meminfo *mem_info, vms$pointer pagesize) { struct memsection *heap; unsigned int i; vms$pointer base; vms$pointer end; notice(SYSBOOT_I_SYSBOOT "reserving memory for preloaded objects\n"); // Initialization pm_alloc.internal.base = 0; pm_alloc.internal.end = 0; pm_alloc.internal.active = 0; vm_alloc.internal.base = 0; vm_alloc.internal.end = 0; vm_alloc.internal.active = 0; for(i = 0; i <= MAX_FPAGE_ORDER; i++) { TAILQ_INIT(&vm_alloc.flist[i]); TAILQ_INIT(&pm_alloc.flist[i]); } // Bootimage objects are removed from free virtual memory. for(i = 0; i < mem_info->num_objects; i++) { if (mem_info->objects[i].flags & VMS$IOF_VIRT) { notice(MEM_I_ALLOC "allocating $%016lX - $%016lX\n", mem_info->objects[i].base, mem_info->objects[i].end); sys$remove_virtmem(mem_info, mem_info->objects[i].base, mem_info->objects[i].end, pagesize); } } // Free up som virtual memory to bootstrap the fpage allocator. for(i = 0; i < mem_info->num_vm_regions; i++) { base = sys$page_round_up(mem_info->vm_regions[i].base, pagesize); end = sys$page_round_down(mem_info->vm_regions[i].end + 1, pagesize) - 1; if ((end - (base + 1)) >= (2 * pagesize)) { notice(MEM_I_FALLOC "bootstrapping Fpage allocator at virtual " "addresses\n"); notice(MEM_I_FALLOC "$%016lX - $%016lX\n", base, end); sys$fpage_free_internal(&vm_alloc, base, end); mem_info->vm_regions[i].end = mem_info->vm_regions[i].base; break; } } PANIC(i >= mem_info->num_regions); // We need to make sure the first chunk of physical memory we free // is at least 2 * pagesize to bootstrap the slab allocators for // memsections and the fpage lists. for(i = 0; i < mem_info->num_regions; i++) { base = sys$page_round_up(mem_info->regions[i].base, pagesize); end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1; if (((end - base) + 1) >= (2 * pagesize)) { notice(MEM_I_SALLOC "bootstrapping Slab allocator at physical " "addresses\n"); notice(MEM_I_SALLOC "$%016lX - $%016lX\n", base, end); sys$fpage_free_chunk(&pm_alloc, base, end); mem_info->regions[i].end = mem_info->regions[i].base; break; } } PANIC(i >= mem_info->num_regions); // Base and end may not be aligned, but we need them to be aligned. If // the area is less than a page than we should not add it to the free list. for(i = 0; i < mem_info->num_regions; i++) { if (mem_info->regions[i].base == mem_info->regions[i].end) { continue; } base = sys$page_round_up(mem_info->regions[i].base, pagesize); end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1; if (base < end) { notice(MEM_I_FREE "freeing region $%016lX - $%016lX\n", base, end); sys$fpage_free_chunk(&pm_alloc, base, end); } } sys$fpage_clear_internal(&vm_alloc); // Initialize VM allocator for(i = 0; i < mem_info->num_vm_regions; i++) { if (mem_info->vm_regions[i].base < mem_info->vm_regions[i].end) { notice(MEM_I_VALLOC "adding $%016lX - $%016lX to VM allocator\n", mem_info->vm_regions[i].base, mem_info->vm_regions[i].end); sys$fpage_free_chunk(&vm_alloc, mem_info->vm_regions[i].base, mem_info->vm_regions[i].end); } } // Setup the kernel heap heap = sys$pd_create_memsection((struct pd *) NULL, VMS$HEAP_SIZE, 0, VMS$MEM_NORMAL | VMS$MEM_USER, pagesize); PANIC(heap == NULL, notice(SYS_F_HEAP "cannot allocate kernel heap\n")); sys$alloc_init(heap->base, heap->end); return; }