/* * Combine two argument path names by putting the second argument * before the first in the first's buffer. This isn't very general; * it is designed specifically for symbolic link processing. * This function copies the symlink in-place in the pathname. This is to * ensure that vnode path caching remains correct. At the point where this is * called (from lookuppnvp), we have called pn_getcomponent(), found it is a * symlink, and are now replacing the contents. The complen parameter indicates * how much of the pathname to replace. If the symlink is an absolute path, * then we overwrite the entire contents of the pathname. */ int pn_insert(struct pathname *pnp, struct pathname *sympnp, size_t complen) { if (*sympnp->pn_path == '/') { /* * Full path, replace everything */ if (pnp->pn_pathlen + sympnp->pn_pathlen >= pnp->pn_bufsize) return (ENAMETOOLONG); if (pnp->pn_pathlen != 0) ovbcopy(pnp->pn_path, pnp->pn_buf + sympnp->pn_pathlen, pnp->pn_pathlen); bcopy(sympnp->pn_path, pnp->pn_buf, sympnp->pn_pathlen); pnp->pn_pathlen += sympnp->pn_pathlen; pnp->pn_buf[pnp->pn_pathlen] = '\0'; pnp->pn_path = pnp->pn_buf; } else { /* * Partial path, replace only last component */ if ((pnp->pn_path - pnp->pn_buf) - complen + pnp->pn_pathlen + sympnp->pn_pathlen >= pnp->pn_bufsize) return (ENAMETOOLONG); if (pnp->pn_pathlen != 0) ovbcopy(pnp->pn_path, pnp->pn_path - complen + sympnp->pn_pathlen, pnp->pn_pathlen + 1); pnp->pn_path -= complen; bcopy(sympnp->pn_path, pnp->pn_path, sympnp->pn_pathlen); pnp->pn_pathlen += sympnp->pn_pathlen; } return (0); }
/* * Convert NetWare filename to Unix with optional conversions */ void ncp_path2unix(char *src, char *dst, int len, struct ncp_nlstables *nt) { int donls; u_char c, *tbl; /* char *d = dst, *s = src;*/ /* printf("toux(%02x): %s:",nt->opt, s);*/ if (nt == NULL) { ovbcopy(src, dst, len); return; } donls = (nt->opt & NWHP_NLS); if ((nt->opt & (NWHP_UPPER | NWHP_LOWER)) == 0) { while (len--) { c = *src; *dst = donls ? nt->n2u[c] : c; dst++; src++; } return; } tbl = (nt->opt & NWHP_LOWER) ? nt->to_lower : nt->to_upper; while (len--) { c = *src; *dst = tbl[donls ? nt->n2u[c] : c]; dst++; src++; } /* printf("%s\n", d);*/ }
void acx100_proc_wep_rxbuf(struct acx_softc *sc, struct mbuf *m, int *len) { int mac_hdrlen; struct ieee80211_frame *f; /* * Strip leading IV and KID, and trailing CRC */ f = mtod(m, struct ieee80211_frame *); if ((f->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) mac_hdrlen = sizeof(struct ieee80211_frame_addr4); else mac_hdrlen = sizeof(struct ieee80211_frame); #define IEEEWEP_IVLEN (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN) #define IEEEWEP_EXLEN (IEEEWEP_IVLEN + IEEE80211_WEP_CRCLEN) *len = *len - IEEEWEP_EXLEN; /* Move MAC header toward frame body */ ovbcopy(f, (uint8_t *)f + IEEEWEP_IVLEN, mac_hdrlen); m_adj(m, IEEEWEP_IVLEN); #undef IEEEWEP_EXLEN #undef IEEEWEP_IVLEN }
void cfxga_eraserows(void *cookie, int row, int num, long attr) { struct rasops_info *ri = cookie; struct cfxga_screen *scr = ri->ri_hw; int fg, bg; int x, y, cx, cy; /* Erase rows in backing store. */ for (x = 0; x < ri->ri_cols; x++) { scr->scr_mem[row * ri->ri_cols + x].uc = 0; scr->scr_mem[row * ri->ri_cols + x].attr = attr; } for (y = 1; y < num; y++) ovbcopy(scr->scr_mem + row * ri->ri_cols, scr->scr_mem + (row + y) * ri->ri_cols, ri->ri_cols * sizeof(struct wsdisplay_charcell)); if (scr != scr->scr_sc->sc_active) return; ri->ri_ops.unpack_attr(cookie, attr, &fg, &bg, NULL); x = ri->ri_xorigin; y = row * ri->ri_font->fontheight + ri->ri_yorigin; cx = ri->ri_emuwidth; cy = num * ri->ri_font->fontheight; cfxga_solid_fill(scr, x, y, cx, cy, ri->ri_devcmap[bg]); }
static int log_rsrv(queue_t *q) { mblk_t *mp; char *msg, *msgid_start, *msgid_end; size_t idlen; while (canputnext(q) && (mp = getq(q)) != NULL) { if (log_msgid == 0) { /* * Strip out the message ID. If it's a kernel * SL_CONSOLE message, replace msgid with "unix: ". */ msg = (char *)mp->b_cont->b_rptr; if ((msgid_start = strstr(msg, "[ID ")) != NULL && (msgid_end = strstr(msgid_start, "] ")) != NULL) { log_ctl_t *lc = (log_ctl_t *)mp->b_rptr; if ((lc->flags & SL_CONSOLE) && (lc->pri & LOG_FACMASK) == LOG_KERN) msgid_start = msg + snprintf(msg, 7, "unix: "); idlen = msgid_end + 2 - msgid_start; ovbcopy(msg, msg + idlen, msgid_start - msg); mp->b_cont->b_rptr += idlen; } } mp->b_band = 0; putnext(q, mp); } return (0); }
/* * Convert name from Unix to NetWare representation. * XXX: it should be complementary with path2unix, but for now * leave it as is. */ void ncp_pathcopy(const char *src, char *dst, int len, struct ncp_nlstables *nt) { int donls; u_char c; /* char *d = dst, *s = src;*/ if (nt == NULL) { ovbcopy(src, dst, len); return; } donls = (nt->opt & NWHP_NLS); if ((nt->opt & (NWHP_UPPER | NWHP_LOWER)) == 0) { while (len--) { *dst = donls ? nt->u2n[(u_char)*src] : *src; dst++; src++; } } else if (nt->opt & NWHP_DOS) { while (len--) { c = nt->to_upper[(u_char)*src]; *dst = donls ? nt->u2n[c] : c; dst++; src++; } return; } else { /* probably incorrect... */ while (len--) { *dst = donls ? nt->u2n[(u_char)*src] : *src; dst++; src++; } } /* printf("fromux: %s:%s\n", s, d);*/ }
void * memmove(void *s1, const void *s2, size_t n) { #if defined(_BOOT) bcopy(s2, s1, n); #else ovbcopy(s2, s1, n); #endif return (s1); }
/* * Add privacy headers appropriate for the specified key. */ static int wep_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid) { struct wep_ctx *ctx = k->wk_private; struct ieee80211com *ic = ctx->wc_ic; uint32_t iv; uint8_t *ivp; int hdrlen; hdrlen = ieee80211_hdrspace(ic, mtod(m, void *)); /* * Copy down 802.11 header and add the IV + KeyID. */ M_PREPEND(m, wep.ic_header, MB_DONTWAIT); if (m == NULL) return 0; ivp = mtod(m, uint8_t *); ovbcopy(ivp + wep.ic_header, ivp, hdrlen); ivp += hdrlen; /* * XXX * IV must not duplicate during the lifetime of the key. * But no mechanism to renew keys is defined in IEEE 802.11 * for WEP. And the IV may be duplicated at other stations * because the session key itself is shared. So we use a * pseudo random IV for now, though it is not the right way. * * NB: Rather than use a strictly random IV we select a * random one to start and then increment the value for * each frame. This is an explicit tradeoff between * overhead and security. Given the basic insecurity of * WEP this seems worthwhile. */ /* * Skip 'bad' IVs from Fluhrer/Mantin/Shamir: * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255 */ iv = ctx->wc_iv; if ((iv & 0xff00) == 0xff00) { int B = (iv & 0xff0000) >> 16; if (3 <= B && B < 16) iv += 0x0100; }
/* * Add a slash to the end of the pathname, if it will fit. * Return ENAMETOOLONG if it won't. */ int pn_addslash(struct pathname *pnp) { if (pnp->pn_path + pnp->pn_pathlen + 1 >= pnp->pn_buf + pnp->pn_bufsize) { if (pnp->pn_pathlen + 1 >= pnp->pn_bufsize) /* no room */ return (ENAMETOOLONG); /* * Move the component to the start of the buffer * so we have room to add the trailing slash. */ ovbcopy(pnp->pn_path, pnp->pn_buf, pnp->pn_pathlen); pnp->pn_path = pnp->pn_buf; } pnp->pn_path[pnp->pn_pathlen++] = '/'; pnp->pn_path[pnp->pn_pathlen] = '\0'; return (0); }
int db_readline(char *lstart, int lsize) { if (lsize != db_lhistlsize) { /* * (Re)initialize input line history. Throw away any * existing history. */ db_lhist_nlines = sizeof(db_lhistory) / lsize; db_lhistlsize = lsize; db_lhistidx = -1; } db_lhistcur = db_lhistidx; db_force_whitespace(); /* synch output position */ db_lbuf_start = lstart; db_lbuf_end = lstart + lsize; db_lc = lstart; db_le = lstart; while (!db_inputchar(cngetc())) continue; db_printf("\n"); /* synch output position */ *db_le = 0; if (db_le - db_lbuf_start > 1) { /* Maintain input line history for non-empty lines. */ if (++db_lhistidx == db_lhist_nlines) { /* Rotate history. */ ovbcopy(db_lhistory + db_lhistlsize, db_lhistory, db_lhistlsize * (db_lhist_nlines - 1)); db_lhistidx--; } bcopy(lstart, db_lhistory + db_lhistidx * db_lhistlsize, db_lhistlsize); } return (db_le - db_lbuf_start); }
void cfxga_copyrows(void *cookie, int src, int dst, int num) { struct rasops_info *ri = cookie; struct cfxga_screen *scr = ri->ri_hw; int x, sy, dy, cx, cy; /* Copy rows in backing store. */ ovbcopy(scr->scr_mem + src * ri->ri_cols, scr->scr_mem + dst * ri->ri_cols, num * ri->ri_cols * sizeof(struct wsdisplay_charcell)); if (scr != scr->scr_sc->sc_active) return; x = ri->ri_xorigin; sy = src * ri->ri_font->fontheight + ri->ri_yorigin; dy = dst * ri->ri_font->fontheight + ri->ri_yorigin; cx = ri->ri_emuwidth; cy = num * ri->ri_font->fontheight; cfxga_standalone_rop(scr, ROP_SRC, x, sy, x, dy, cx, cy); }
void cfxga_copycols(void *cookie, int row, int src, int dst, int num) { struct rasops_info *ri = cookie; struct cfxga_screen *scr = ri->ri_hw; int sx, dx, y, cx, cy; /* Copy columns in backing store. */ ovbcopy(scr->scr_mem + row * ri->ri_cols + src, scr->scr_mem + row * ri->ri_cols + dst, num * sizeof(struct wsdisplay_charcell)); if (scr != scr->scr_sc->sc_active) return; sx = src * ri->ri_font->fontwidth + ri->ri_xorigin; dx = dst * ri->ri_font->fontwidth + ri->ri_xorigin; y = row * ri->ri_font->fontheight + ri->ri_yorigin; cx = num * ri->ri_font->fontwidth; cy = ri->ri_font->fontheight; cfxga_standalone_rop(scr, ROP_SRC, sx, y, dx, y, cx, cy); }
/* * thread_getstatus: * * Get the status of the specified thread. */ kern_return_t machine_thread_get_state(thread_t thr_act, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t * count) { switch (flavor) { case THREAD_STATE_FLAVOR_LIST: { if (*count < 3) return (KERN_INVALID_ARGUMENT); tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; tstate[2] = ARM_EXCEPTION_STATE; *count = 3; break; } case THREAD_STATE_FLAVOR_LIST_NEW: { if (*count < 4) return (KERN_INVALID_ARGUMENT); tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; tstate[2] = ARM_EXCEPTION_STATE; tstate[3] = ARM_DEBUG_STATE; *count = 4; break; } case ARM_THREAD_STATE: { struct arm_thread_state *state; struct arm_thread_state *saved_state; if (*count < ARM_THREAD_STATE_COUNT) return (KERN_INVALID_ARGUMENT); state = (struct arm_thread_state *) tstate; saved_state = (struct arm_thread_state *) thr_act->machine.uss; /* * First, copy everything: */ ovbcopy((void*)saved_state, (void*)state, sizeof(struct arm_thread_state)); *count = ARM_THREAD_STATE_COUNT; break; } case ARM_VFP_STATE: { struct arm_vfp_state *state; struct arm_vfp_state *saved_state; if (*count < ARM_VFP_STATE_COUNT) return (KERN_INVALID_ARGUMENT); state = (struct arm_vfp_state *) tstate; saved_state = (struct arm_vfp_state *) &thr_act->machine.vfp_regs; /* * First, copy everything: */ *state = *saved_state; *count = ARM_VFP_STATE_COUNT; break; } default: return (KERN_INVALID_ARGUMENT); } return (KERN_SUCCESS); }
/* * Make space for a new header of length hlen at skip bytes * into the packet. When doing this we allocate new mbufs only * when absolutely necessary. The mbuf where the new header * is to go is returned together with an offset into the mbuf. * If NULL is returned then the mbuf chain may have been modified; * the caller is assumed to always free the chain. */ struct mbuf * m_makespace(struct mbuf *m0, int skip, int hlen, int *off) { struct mbuf *m; unsigned remain; KASSERT(m0 != NULL, ("m_dmakespace: null mbuf")); KASSERT(hlen < MHLEN, ("m_makespace: hlen too big: %u", hlen)); for (m = m0; m && skip > m->m_len; m = m->m_next) skip -= m->m_len; if (m == NULL) return (NULL); /* * At this point skip is the offset into the mbuf m * where the new header should be placed. Figure out * if there's space to insert the new header. If so, * and copying the remainder makese sense then do so. * Otherwise insert a new mbuf in the chain, splitting * the contents of m as needed. */ remain = m->m_len - skip; /* data to move */ if (hlen > M_TRAILINGSPACE(m)) { struct mbuf *n; /* XXX code doesn't handle clusters XXX */ KASSERT(remain < MLEN, ("m_makespace: remainder too big: %u", remain)); /* * Not enough space in m, split the contents * of m, inserting new mbufs as required. * * NB: this ignores mbuf types. */ MGET(n, MB_DONTWAIT, MT_DATA); if (n == NULL) return (NULL); n->m_next = m->m_next; /* splice new mbuf */ m->m_next = n; newipsecstat.ips_mbinserted++; if (hlen <= M_TRAILINGSPACE(m) + remain) { /* * New header fits in the old mbuf if we copy * the remainder; just do the copy to the new * mbuf and we're good to go. */ memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + skip, remain); n->m_len = remain; m->m_len = skip + hlen; *off = skip; } else { /* * No space in the old mbuf for the new header. * Make space in the new mbuf and check the * remainder'd data fits too. If not then we * must allocate an additional mbuf (yech). */ n->m_len = 0; if (remain + hlen > M_TRAILINGSPACE(n)) { struct mbuf *n2; MGET(n2, MB_DONTWAIT, MT_DATA); /* NB: new mbuf is on chain, let caller free */ if (n2 == NULL) return (NULL); n2->m_len = 0; memcpy(mtod(n2, caddr_t), mtod(m, caddr_t) + skip, remain); n2->m_len = remain; /* splice in second mbuf */ n2->m_next = n->m_next; n->m_next = n2; newipsecstat.ips_mbinserted++; } else { memcpy(mtod(n, caddr_t) + hlen, mtod(m, caddr_t) + skip, remain); n->m_len += remain; } m->m_len -= remain; n->m_len += hlen; m = n; /* header is at front ... */ *off = 0; /* ... of new mbuf */ } } else { /* * Copy the remainder to the back of the mbuf * so there's space to write the new header. */ /* XXX can this be memcpy? does it handle overlap? */ ovbcopy(mtod(m, caddr_t) + skip, mtod(m, caddr_t) + skip + hlen, remain); m->m_len += hlen; *off = skip; } m0->m_pkthdr.len += hlen; /* adjust packet length */ return m; }
/** * PE_init_platform * * Initialize the platform expert for ARM. */ void PE_init_platform(boolean_t vm_initialized, void * _args) { boot_args *args = (boot_args *)_args; if (PE_state.initialized == FALSE) { PE_early_puts("PE_init_platform: My name is Macintosh.\n"); PE_early_puts("PE_init_platform: Initializing for the first time.\n"); PE_state.initialized = TRUE; PE_state.bootArgs = _args; PE_state.deviceTreeHead = args->deviceTreeP; PE_state.video.v_baseAddr = args->Video.v_baseAddr; PE_state.video.v_rowBytes = args->Video.v_rowBytes; PE_state.video.v_width = args->Video.v_width; PE_state.video.v_height = args->Video.v_height; PE_state.video.v_depth = args->Video.v_depth; PE_state.video.v_display = args->Video.v_display; strcpy(PE_state.video.v_pixelFormat, "PPPPPPPP"); } if (!vm_initialized) { /* Initialize the device tree crap. */ PE_early_puts("PE_init_platform: Initializing device tree\n"); DTInit(PE_state.deviceTreeHead); PE_early_puts("PE_init_platform: Calling pe_identify_machine\n"); pe_identify_machine(NULL); } else { DTEntry entry; char* fversion, map; unsigned int size; pe_initialized = 1; kprintf("PE_init_platform: It sure is great to get out of that bag.\n"); PE_init_SocSupport(); /* Reset kputc. */ PE_kputc = gPESocDispatch.uart_putc; /* XXX: Real iOS kernel does iBoot/debug-enabled init after the DTInit call. */ if( kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { /* What's the iBoot version on this bad boy? */ if( kSuccess == DTGetProperty(entry, "firmware-version", (void **) &fversion, &size)) { if(fversion && (strlen(fversion) <= 32)) { ovbcopy((void*)fversion, (void*)firmware_version, strlen(fversion)); } } /* Is the SoC debug-enabled? */ if( kSuccess == DTGetProperty(entry, "debug-enabled", (void **) &map, &size)) { debug_enabled = 1; } } pe_arm_init_interrupts(NULL); } }
/* * Given a directory, return the full, resolved path. This looks up "..", * searches for the given vnode in the parent, appends the component, etc. It * is used to implement vnodetopath() and getcwd() when the cached path fails. */ static int dirtopath(vnode_t *vrootp, vnode_t *vp, char *buf, size_t buflen, int flags, cred_t *cr) { pathname_t pn, rpn, emptypn; vnode_t *cmpvp, *pvp = NULL; vnode_t *startvp = vp; int err = 0, vprivs; size_t complen; char *dbuf; dirent64_t *dp; char *bufloc; size_t dlen = DIRENT64_RECLEN(MAXPATHLEN); refstr_t *mntpt; /* Operation only allowed on directories */ ASSERT(vp->v_type == VDIR); /* We must have at least enough space for "/" */ if (buflen < 2) return (ENAMETOOLONG); /* Start at end of string with terminating null */ bufloc = &buf[buflen - 1]; *bufloc = '\0'; pn_alloc(&pn); pn_alloc(&rpn); dbuf = kmem_alloc(dlen, KM_SLEEP); bzero(&emptypn, sizeof (emptypn)); /* * Begin with an additional reference on vp. This will be decremented * during the loop. */ VN_HOLD(vp); for (;;) { /* * Return if we've reached the root. If the buffer is empty, * return '/'. We explicitly don't use vn_compare(), since it * compares the real vnodes. A lofs mount of '/' would produce * incorrect results otherwise. */ if (VN_CMP(vrootp, vp)) { if (*bufloc == '\0') *--bufloc = '/'; break; } /* * If we've reached the VFS root, something has gone wrong. We * should have reached the root in the above check. The only * explantation is that 'vp' is not contained withing the given * root, in which case we return EPERM. */ if (VN_CMP(rootdir, vp)) { err = EPERM; goto out; } /* * Shortcut: see if this vnode is a mountpoint. If so, * grab the path information from the vfs_t. */ if (vp->v_flag & VROOT) { mntpt = vfs_getmntpoint(vp->v_vfsp); if ((err = pn_set(&pn, (char *)refstr_value(mntpt))) == 0) { refstr_rele(mntpt); rpn.pn_path = rpn.pn_buf; /* * Ensure the mountpoint still exists. */ VN_HOLD(vrootp); if (vrootp != rootdir) VN_HOLD(vrootp); if (lookuppnvp(&pn, &rpn, flags, NULL, &cmpvp, vrootp, vrootp, cr) == 0) { if (VN_CMP(vp, cmpvp)) { VN_RELE(cmpvp); complen = strlen(rpn.pn_path); bufloc -= complen; if (bufloc < buf) { err = ERANGE; goto out; } bcopy(rpn.pn_path, bufloc, complen); break; } else { VN_RELE(cmpvp); } } } else { refstr_rele(mntpt); } } /* * Shortcut: see if this vnode has correct v_path. If so, * we have the work done. */ mutex_enter(&vp->v_lock); if (vp->v_path != NULL) { if ((err = pn_set(&pn, vp->v_path)) == 0) { mutex_exit(&vp->v_lock); rpn.pn_path = rpn.pn_buf; /* * Ensure the v_path pointing to correct vnode */ VN_HOLD(vrootp); if (vrootp != rootdir) VN_HOLD(vrootp); if (lookuppnvp(&pn, &rpn, flags, NULL, &cmpvp, vrootp, vrootp, cr) == 0) { if (VN_CMP(vp, cmpvp)) { VN_RELE(cmpvp); complen = strlen(rpn.pn_path); bufloc -= complen; if (bufloc < buf) { err = ERANGE; goto out; } bcopy(rpn.pn_path, bufloc, complen); break; } else { VN_RELE(cmpvp); } } } else { mutex_exit(&vp->v_lock); } } else { mutex_exit(&vp->v_lock); } /* * Shortcuts failed, search for this vnode in its parent. If * this is a mountpoint, then get the vnode underneath. */ if (vp->v_flag & VROOT) vp = vn_under(vp); if ((err = VOP_LOOKUP(vp, "..", &pvp, &emptypn, 0, vrootp, cr, NULL, NULL, NULL)) != 0) goto out; /* * With extended attributes, it's possible for a directory to * have a parent that is a regular file. Check for that here. */ if (pvp->v_type != VDIR) { err = ENOTDIR; goto out; } /* * If this is true, something strange has happened. This is * only true if we are the root of a filesystem, which should * have been caught by the check above. */ if (VN_CMP(pvp, vp)) { err = ENOENT; goto out; } /* * Check if we have read and search privilege so, that * we can lookup the path in the directory */ vprivs = (flags & LOOKUP_CHECKREAD) ? VREAD | VEXEC : VEXEC; if ((err = VOP_ACCESS(pvp, vprivs, 0, cr, NULL)) != 0) { goto out; } /* * Try to obtain the path component from dnlc cache * before searching through the directory. */ if ((cmpvp = dnlc_reverse_lookup(vp, dbuf, dlen)) != NULL) { /* * If we got parent vnode as a result, * then the answered path is correct. */ if (VN_CMP(cmpvp, pvp)) { VN_RELE(cmpvp); complen = strlen(dbuf); bufloc -= complen; if (bufloc <= buf) { err = ENAMETOOLONG; goto out; } bcopy(dbuf, bufloc, complen); /* Prepend a slash to the current path */ *--bufloc = '/'; /* And continue with the next component */ VN_RELE(vp); vp = pvp; pvp = NULL; continue; } else { VN_RELE(cmpvp); } } /* * Search the parent directory for the entry corresponding to * this vnode. */ if ((err = dirfindvp(vrootp, pvp, vp, cr, dbuf, dlen, &dp)) != 0) goto out; complen = strlen(dp->d_name); bufloc -= complen; if (bufloc <= buf) { err = ENAMETOOLONG; goto out; } bcopy(dp->d_name, bufloc, complen); /* Prepend a slash to the current path. */ *--bufloc = '/'; /* And continue with the next component */ VN_RELE(vp); vp = pvp; pvp = NULL; } /* * Place the path at the beginning of the buffer. */ if (bufloc != buf) ovbcopy(bufloc, buf, buflen - (bufloc - buf)); out: /* * If the error was ESTALE and the current directory to look in * was the root for this lookup, the root for a mounted file * system, or the starting directory for lookups, then * return ENOENT instead of ESTALE. In this case, no recovery * is possible by the higher level. If ESTALE was returned for * some intermediate directory along the path, then recovery * is potentially possible and retrying from the higher level * will either correct the situation by purging stale cache * entries or eventually get back to the point where no recovery * is possible. */ if (err == ESTALE && (VN_CMP(vp, vrootp) || (vp->v_flag & VROOT) || vp == startvp)) err = ENOENT; kmem_free(dbuf, dlen); VN_RELE(vp); if (pvp) VN_RELE(pvp); pn_free(&pn); pn_free(&rpn); return (err); }
/* * Massage IPv4/IPv6 headers for AH processing. */ int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int ad, alloc, nxt; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("ah_massage_headers(): m_pullup() failed\n")); ahstat.ahs_hdrops++; return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; /* * On input, fix ip_len which has been byte-swapped * at ip_input(). */ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= htons(IP_DF); else ip->ip_off = 0; ptr = mtod(m, unsigned char *) + sizeof(struct ip); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("ah_massage_headers(): illegal IPv4 " "option length for option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* FALLTHROUGH */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("ah_massage_headers(): " "illegal IPv4 option length for " "option %d\n", ptr[off])); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("ah_massage_headers(): malformed " "IPv4 options header\n")); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("ah_massage_headers(): unsupported IPv6 " "jumbogram")); ahstat.ahs_hdrops++; m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_EMBED(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_EMBED(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = malloc(skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("ah_massage_headers(): failed to allocate memory for IPv6 headers\n")); ahstat.ahs_hdrops++; m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; nxt = ip6.ip6_nxt & 0xff; /* Next header type. */ for (off = 0; off < skip - sizeof(struct ip6_hdr);) { switch (nxt) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *) (ptr + off); /* * Process the mutable/immutable * options -- borrows heavily from the * KAME code. */ for (count = off + sizeof(struct ip6_ext); count < off + ((ip6e->ip6e_len + 1) << 3);) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } /* Sanity check. */ if (count > off + ((ip6e->ip6e_len + 1) << 3)) { ahstat.ahs_hdrops++; m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } ad = ptr[count + 1]; /* If mutable option, zeroize. */ if (ptr[count] & IP6OPT_MUTABLE) bcopy(ipseczeroes, ptr + count, ptr[count + 1]); count += ad; /* Sanity check. */ if (count > skip - sizeof(struct ip6_hdr)) { ahstat.ahs_hdrops++; m_freem(m); /* Free, if we allocated. */ if (alloc) free(ptr, M_XDATA); return EINVAL; } } /* Advance. */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ { struct ip6_rthdr *rh; ip6e = (struct ip6_ext *) (ptr + off); rh = (struct ip6_rthdr *)(ptr + off); /* * must adjust content to make it look like * its final form (as seen at the final * destination). * we only know how to massage type 0 routing * header. */ if (out && rh->ip6r_type == IPV6_RTHDR_TYPE_0) { struct ip6_rthdr0 *rh0; struct in6_addr *addr, finaldst; int i; rh0 = (struct ip6_rthdr0 *)rh; addr = (struct in6_addr *)(rh0 + 1); for (i = 0; i < rh0->ip6r0_segleft; i++) if (IN6_IS_SCOPE_EMBED(&addr[i])) addr[i].s6_addr16[1] = 0; finaldst = addr[rh0->ip6r0_segleft - 1]; ovbcopy(&addr[0], &addr[1], sizeof(struct in6_addr) * (rh0->ip6r0_segleft - 1)); m_copydata(m, 0, sizeof(ip6), (caddr_t)&ip6); addr[0] = ip6.ip6_dst; ip6.ip6_dst = finaldst; m_copyback(m, 0, sizeof(ip6), &ip6); rh0->ip6r0_segleft = 0; } /* advance */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; } default: DPRINTF(("ah_massage_headers(): unexpected " "IPv6 header type %d\n", off)); if (alloc) free(ptr, M_XDATA); ahstat.ahs_hdrops++; m_freem(m); return EINVAL; } } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ }
/* * Potentially decap ESP in UDP frame. Check for an ESP header * and optional marker; if present, strip the UDP header and * push the result through IPSec. * * Returns mbuf to be processed (potentially re-allocated) or * NULL if consumed and/or processed. */ static struct mbuf * udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off) { size_t minlen, payload, skip, iphlen; caddr_t data; struct udpcb *up; struct m_tag *tag; struct udphdr *udphdr; struct ip *ip; INP_RLOCK_ASSERT(inp); /* * Pull up data so the longest case is contiguous: * IP/UDP hdr + non ESP marker + ESP hdr. */ minlen = off + sizeof(uint64_t) + sizeof(struct esp); if (minlen > m->m_pkthdr.len) minlen = m->m_pkthdr.len; if ((m = m_pullup(m, minlen)) == NULL) { IPSECSTAT_INC(ips_in_inval); return (NULL); /* Bypass caller processing. */ } data = mtod(m, caddr_t); /* Points to ip header. */ payload = m->m_len - off; /* Size of payload. */ if (payload == 1 && data[off] == '\xff') return (m); /* NB: keepalive packet, no decap. */ up = intoudpcb(inp); KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0, ("u_flags 0x%x", up->u_flags)); /* * Check that the payload is large enough to hold an * ESP header and compute the amount of data to remove. * * NB: the caller has already done a pullup for us. * XXX can we assume alignment and eliminate bcopys? */ if (up->u_flags & UF_ESPINUDP_NON_IKE) { /* * draft-ietf-ipsec-nat-t-ike-0[01].txt and * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring * possible AH mode non-IKE marker+non-ESP marker * from draft-ietf-ipsec-udp-encaps-00.txt. */ uint64_t marker; if (payload <= sizeof(uint64_t) + sizeof(struct esp)) return (m); /* NB: no decap. */ bcopy(data + off, &marker, sizeof(uint64_t)); if (marker != 0) /* Non-IKE marker. */ return (m); /* NB: no decap. */ skip = sizeof(uint64_t) + sizeof(struct udphdr); } else { uint32_t spi; if (payload <= sizeof(struct esp)) { IPSECSTAT_INC(ips_in_inval); m_freem(m); return (NULL); /* Discard. */ } bcopy(data + off, &spi, sizeof(uint32_t)); if (spi == 0) /* Non-ESP marker. */ return (m); /* NB: no decap. */ skip = sizeof(struct udphdr); } /* * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember * the UDP ports. This is required if we want to select * the right SPD for multiple hosts behind same NAT. * * NB: ports are maintained in network byte order everywhere * in the NAT-T code. */ tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS, 2 * sizeof(uint16_t), M_NOWAIT); if (tag == NULL) { IPSECSTAT_INC(ips_in_nomem); m_freem(m); return (NULL); /* Discard. */ } iphlen = off - sizeof(struct udphdr); udphdr = (struct udphdr *)(data + iphlen); ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport; ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport; m_tag_prepend(m, tag); /* * Remove the UDP header (and possibly the non ESP marker) * IP header length is iphlen * Before: * <--- off ---> * +----+------+-----+ * | IP | UDP | ESP | * +----+------+-----+ * <-skip-> * After: * +----+-----+ * | IP | ESP | * +----+-----+ * <-skip-> */ ovbcopy(data, data + skip, iphlen); m_adj(m, skip); ip = mtod(m, struct ip *); ip->ip_len = htons(ntohs(ip->ip_len) - skip); ip->ip_p = IPPROTO_ESP; /* * We cannot yet update the cksums so clear any * h/w cksum flags as they are no longer valid. */ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR); (void) ipsec4_common_input(m, iphlen, ip->ip_p); return (NULL); /* NB: consumed, bypass processing. */ }
/* * OOTB version of the above. * If iserror == 0, sends an abort. If iserror != 0, sends an error. */ void sctp_ootb_send_abort(uint32_t vtag, uint16_t serror, char *details, size_t len, const mblk_t *inmp, int iserror, boolean_t tbit, ip_recv_attr_t *ira, ip_stack_t *ipst) { uint32_t ip_hdr_len; size_t ahlen; ipha_t *ipha = NULL; ip6_t *ip6h = NULL; sctp_hdr_t *insctph; int i; uint16_t port; ssize_t alen; int isv4; mblk_t *mp; netstack_t *ns = ipst->ips_netstack; sctp_stack_t *sctps = ns->netstack_sctp; ip_xmit_attr_t ixas; bzero(&ixas, sizeof (ixas)); isv4 = (IPH_HDR_VERSION(inmp->b_rptr) == IPV4_VERSION); ip_hdr_len = ira->ira_ip_hdr_length; ahlen = ip_hdr_len + sizeof (sctp_hdr_t); /* * If this is a labeled system, then check to see if we're allowed to * send a response to this particular sender. If not, then just drop. */ if (is_system_labeled() && !tsol_can_reply_error(inmp, ira)) return; mp = allocb(ahlen + sctps->sctps_wroff_xtra, BPRI_MED); if (mp == NULL) { return; } mp->b_rptr += sctps->sctps_wroff_xtra; mp->b_wptr = mp->b_rptr + ahlen; bcopy(inmp->b_rptr, mp->b_rptr, ahlen); /* * We follow the logic in tcp_xmit_early_reset() in that we skip * reversing source route (i.e. replace all IP options with EOL). */ if (isv4) { ipaddr_t v4addr; ipha = (ipha_t *)mp->b_rptr; for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) mp->b_rptr[i] = IPOPT_EOL; /* Swap addresses */ ipha->ipha_length = htons(ahlen); v4addr = ipha->ipha_src; ipha->ipha_src = ipha->ipha_dst; ipha->ipha_dst = v4addr; ipha->ipha_ident = 0; ipha->ipha_ttl = (uchar_t)sctps->sctps_ipv4_ttl; ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4; } else { in6_addr_t v6addr; ip6h = (ip6_t *)mp->b_rptr; /* Remove any extension headers assuming partial overlay */ if (ip_hdr_len > IPV6_HDR_LEN) { uint8_t *to; to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN; ovbcopy(ip6h, to, IPV6_HDR_LEN); mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN; ip_hdr_len = IPV6_HDR_LEN; ip6h = (ip6_t *)mp->b_rptr; ip6h->ip6_nxt = IPPROTO_SCTP; ahlen = ip_hdr_len + sizeof (sctp_hdr_t); } ip6h->ip6_plen = htons(ahlen - IPV6_HDR_LEN); v6addr = ip6h->ip6_src; ip6h->ip6_src = ip6h->ip6_dst; ip6h->ip6_dst = v6addr; ip6h->ip6_hops = (uchar_t)sctps->sctps_ipv6_hoplimit; ixas.ixa_flags = IXAF_BASIC_SIMPLE_V6; if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) { ixas.ixa_flags |= IXAF_SCOPEID_SET; ixas.ixa_scopeid = ira->ira_ruifindex; } } insctph = (sctp_hdr_t *)(mp->b_rptr + ip_hdr_len); /* Swap ports. Verification tag is reused. */ port = insctph->sh_sport; insctph->sh_sport = insctph->sh_dport; insctph->sh_dport = port; insctph->sh_verf = vtag; /* Link in the abort chunk */ if ((alen = sctp_link_abort(mp, serror, details, len, iserror, tbit)) < 0) { freemsg(mp); return; } ixas.ixa_pktlen = ahlen + alen; ixas.ixa_ip_hdr_length = ip_hdr_len; if (isv4) { ipha->ipha_length = htons(ixas.ixa_pktlen); } else { ip6h->ip6_plen = htons(ixas.ixa_pktlen - IPV6_HDR_LEN); } ixas.ixa_protocol = IPPROTO_SCTP; ixas.ixa_zoneid = ira->ira_zoneid; ixas.ixa_ipst = ipst; ixas.ixa_ifindex = 0; SCTPS_BUMP_MIB(sctps, sctpAborted); if (is_system_labeled()) { ASSERT(ira->ira_tsl != NULL); ixas.ixa_tsl = ira->ira_tsl; /* A multi-level responder */ } if (ira->ira_flags & IRAF_IPSEC_SECURE) { /* * Apply IPsec based on how IPsec was applied to * the packet that was out of the blue. */ if (!ipsec_in_to_out(ira, &ixas, mp, ipha, ip6h)) { BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); /* Note: mp already consumed and ip_drop_packet done */ return; } } else { /* * This is in clear. The abort message we are building * here should go out in clear, independent of our policy. */ ixas.ixa_flags |= IXAF_NO_IPSEC; } (void) ip_output_simple(mp, &ixas); ixa_cleanup(&ixas); }