int metadata_write(struct hast_resource *res) { struct ebuf *eb; struct nv *nv; unsigned char *buf, *ptr; size_t size; ssize_t done; int ret; buf = calloc(1, METADATA_SIZE); if (buf == NULL) { pjdlog_error("Unable to allocate %zu bytes for metadata.", (size_t)METADATA_SIZE); return (-1); } ret = -1; nv = nv_alloc(); nv_add_string(nv, res->hr_name, "resource"); nv_add_uint64(nv, (uint64_t)res->hr_datasize, "datasize"); nv_add_uint32(nv, (uint32_t)res->hr_extentsize, "extentsize"); nv_add_uint32(nv, (uint32_t)res->hr_keepdirty, "keepdirty"); nv_add_uint64(nv, (uint64_t)res->hr_localoff, "offset"); nv_add_uint64(nv, res->hr_resuid, "resuid"); if (res->hr_role == HAST_ROLE_PRIMARY || res->hr_role == HAST_ROLE_INIT) { nv_add_uint64(nv, res->hr_primary_localcnt, "localcnt"); nv_add_uint64(nv, res->hr_primary_remotecnt, "remotecnt"); } else /* if (res->hr_role == HAST_ROLE_SECONDARY) */ { PJDLOG_ASSERT(res->hr_role == HAST_ROLE_SECONDARY); nv_add_uint64(nv, res->hr_secondary_localcnt, "localcnt"); nv_add_uint64(nv, res->hr_secondary_remotecnt, "remotecnt"); } nv_add_string(nv, role2str(res->hr_role), "prevrole"); if (nv_error(nv) != 0) { pjdlog_error("Unable to create metadata."); goto end; } res->hr_previous_role = res->hr_role; eb = nv_hton(nv); PJDLOG_ASSERT(eb != NULL); ptr = ebuf_data(eb, &size); PJDLOG_ASSERT(ptr != NULL); PJDLOG_ASSERT(size < METADATA_SIZE); bcopy(ptr, buf, size); done = pwrite(res->hr_localfd, buf, METADATA_SIZE, 0); if (done == -1 || done != METADATA_SIZE) { pjdlog_errno(LOG_ERR, "Unable to write metadata"); goto end; } ret = 0; end: free(buf); nv_free(nv); return (ret); }
static void ssl_log_errors(void) { unsigned long error; while ((error = ERR_get_error()) != 0) pjdlog_error("SSL error: %s", ERR_error_string(error, NULL)); }
int provinfo(struct hast_resource *res, bool dowrite) { struct stat sb; PJDLOG_ASSERT(res->hr_localpath != NULL && res->hr_localpath[0] != '\0'); if (res->hr_localfd == -1) { res->hr_localfd = open(res->hr_localpath, dowrite ? O_RDWR : O_RDONLY); if (res->hr_localfd == -1) { pjdlog_errno(LOG_ERR, "Unable to open %s", res->hr_localpath); return (-1); } } if (fstat(res->hr_localfd, &sb) == -1) { pjdlog_errno(LOG_ERR, "Unable to stat %s", res->hr_localpath); return (-1); } if (S_ISCHR(sb.st_mode)) { /* * If this is character device, it is most likely GEOM provider. */ if (ioctl(res->hr_localfd, DIOCGMEDIASIZE, &res->hr_local_mediasize) == -1) { pjdlog_errno(LOG_ERR, "Unable obtain provider %s mediasize", res->hr_localpath); return (-1); } if (ioctl(res->hr_localfd, DIOCGSECTORSIZE, &res->hr_local_sectorsize) == -1) { pjdlog_errno(LOG_ERR, "Unable obtain provider %s sectorsize", res->hr_localpath); return (-1); } } else if (S_ISREG(sb.st_mode)) { /* * We also support regular files for which we hardcode * sector size of 512 bytes. */ res->hr_local_mediasize = sb.st_size; res->hr_local_sectorsize = 512; } else { /* * We support no other file types. */ pjdlog_error("%s is neither GEOM provider nor regular file.", res->hr_localpath); errno = EFTYPE; return (-1); } return (0); }
struct service_connection * service_connection_add(struct service *service, int sock, const nvlist_t *limits) { struct service_connection *sconn; int serrno; PJDLOG_ASSERT(service->s_magic == SERVICE_MAGIC); sconn = malloc(sizeof(*sconn)); if (sconn == NULL) { pjdlog_error("Unable to allocate memory for service connection."); return (NULL); } sconn->sc_chan = cap_wrap(sock); if (sconn->sc_chan == NULL) { serrno = errno; pjdlog_error("Unable to wrap communication channel."); free(sconn); errno = serrno; return (NULL); } if (limits == NULL) { sconn->sc_limits = NULL; } else { sconn->sc_limits = nvlist_clone(limits); if (sconn->sc_limits == NULL) { serrno = errno; pjdlog_error("Unable to clone limits."); (void)cap_unwrap(sconn->sc_chan); free(sconn); errno = serrno; return (NULL); } } sconn->sc_magic = SERVICE_CONNECTION_MAGIC; TAILQ_INSERT_TAIL(&service->s_connections, sconn, sc_next); return (sconn); }
static int requnpack(struct hast_resource *res, struct hio *hio, struct nv *nv) { hio->hio_cmd = nv_get_uint8(nv, "cmd"); if (hio->hio_cmd == 0) { pjdlog_error("Header contains no 'cmd' field."); hio->hio_error = EINVAL; goto end; } if (hio->hio_cmd != HIO_KEEPALIVE) { hio->hio_seq = nv_get_uint64(nv, "seq"); if (hio->hio_seq == 0) { pjdlog_error("Header contains no 'seq' field."); hio->hio_error = EINVAL; goto end; } } switch (hio->hio_cmd) { case HIO_FLUSH: case HIO_KEEPALIVE: break; case HIO_READ: case HIO_WRITE: case HIO_DELETE: hio->hio_offset = nv_get_uint64(nv, "offset"); if (nv_error(nv) != 0) { pjdlog_error("Header is missing 'offset' field."); hio->hio_error = EINVAL; goto end; } hio->hio_length = nv_get_uint64(nv, "length"); if (nv_error(nv) != 0) { pjdlog_error("Header is missing 'length' field."); hio->hio_error = EINVAL; goto end; } if (hio->hio_length == 0) { pjdlog_error("Data length is zero."); hio->hio_error = EINVAL; goto end; } if (hio->hio_length > MAXPHYS) { pjdlog_error("Data length is too large (%ju > %ju).", (uintmax_t)hio->hio_length, (uintmax_t)MAXPHYS); hio->hio_error = EINVAL; goto end; } if ((hio->hio_offset % res->hr_local_sectorsize) != 0) { pjdlog_error("Offset %ju is not multiple of sector size.", (uintmax_t)hio->hio_offset); hio->hio_error = EINVAL; goto end; } if ((hio->hio_length % res->hr_local_sectorsize) != 0) { pjdlog_error("Length %ju is not multiple of sector size.", (uintmax_t)hio->hio_length); hio->hio_error = EINVAL; goto end; } if (hio->hio_offset + hio->hio_length > (uint64_t)res->hr_datasize) { pjdlog_error("Data offset is too large (%ju > %ju).", (uintmax_t)(hio->hio_offset + hio->hio_length), (uintmax_t)res->hr_datasize); hio->hio_error = EINVAL; goto end; } break; default: pjdlog_error("Header contains invalid 'cmd' (%hhu).", hio->hio_cmd); hio->hio_error = EINVAL; goto end; } hio->hio_error = 0; end: return (hio->hio_error); }
static void init_remote(struct hast_resource *res, struct nv *nvin) { uint64_t resuid; struct nv *nvout; unsigned char *map; size_t mapsize; #ifdef notyet /* Setup direction. */ if (proto_send(res->hr_remoteout, NULL, 0) == -1) pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); #endif map = NULL; mapsize = 0; nvout = nv_alloc(); nv_add_int64(nvout, (int64_t)res->hr_datasize, "datasize"); nv_add_int32(nvout, (int32_t)res->hr_extentsize, "extentsize"); resuid = nv_get_uint64(nvin, "resuid"); res->hr_primary_localcnt = nv_get_uint64(nvin, "localcnt"); res->hr_primary_remotecnt = nv_get_uint64(nvin, "remotecnt"); nv_add_uint64(nvout, res->hr_secondary_localcnt, "localcnt"); nv_add_uint64(nvout, res->hr_secondary_remotecnt, "remotecnt"); mapsize = activemap_calc_ondisk_size(res->hr_local_mediasize - METADATA_SIZE, res->hr_extentsize, res->hr_local_sectorsize); map = malloc(mapsize); if (map == NULL) { pjdlog_exitx(EX_TEMPFAIL, "Unable to allocate memory (%zu bytes) for activemap.", mapsize); } /* * When we work as primary and secondary is missing we will increase * localcnt in our metadata. When secondary is connected and synced * we make localcnt be equal to remotecnt, which means nodes are more * or less in sync. * Split-brain condition is when both nodes are not able to communicate * and are both configured as primary nodes. In turn, they can both * make incompatible changes to the data and we have to detect that. * Under split-brain condition we will increase our localcnt on first * write and remote node will increase its localcnt on first write. * When we connect we can see that primary's localcnt is greater than * our remotecnt (primary was modified while we weren't watching) and * our localcnt is greater than primary's remotecnt (we were modified * while primary wasn't watching). * There are many possible combinations which are all gathered below. * Don't pay too much attention to exact numbers, the more important * is to compare them. We compare secondary's local with primary's * remote and secondary's remote with primary's local. * Note that every case where primary's localcnt is smaller than * secondary's remotecnt and where secondary's localcnt is smaller than * primary's remotecnt should be impossible in practise. We will perform * full synchronization then. Those cases are marked with an asterisk. * Regular synchronization means that only extents marked as dirty are * synchronized (regular synchronization). * * SECONDARY METADATA PRIMARY METADATA * local=3 remote=3 local=2 remote=2* ?! Full sync from secondary. * local=3 remote=3 local=2 remote=3* ?! Full sync from primary. * local=3 remote=3 local=2 remote=4* ?! Full sync from primary. * local=3 remote=3 local=3 remote=2 Primary is out-of-date, * regular sync from secondary. * local=3 remote=3 local=3 remote=3 Regular sync just in case. * local=3 remote=3 local=3 remote=4* ?! Full sync from primary. * local=3 remote=3 local=4 remote=2 Split-brain condition. * local=3 remote=3 local=4 remote=3 Secondary out-of-date, * regular sync from primary. * local=3 remote=3 local=4 remote=4* ?! Full sync from primary. */ if (res->hr_resuid == 0) { /* * Provider is used for the first time. If primary node done no * writes yet as well (we will find "virgin" argument) then * there is no need to synchronize anything. If primary node * done any writes already we have to synchronize everything. */ PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); res->hr_resuid = resuid; if (metadata_write(res) < 0) exit(EX_NOINPUT); if (nv_exists(nvin, "virgin")) { free(map); map = NULL; mapsize = 0; } else { memset(map, 0xff, mapsize); } nv_add_int8(nvout, 1, "virgin"); nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc"); } else if (res->hr_resuid != resuid) { char errmsg[256]; free(map); (void)snprintf(errmsg, sizeof(errmsg), "Resource unique ID mismatch (primary=%ju, secondary=%ju).", (uintmax_t)resuid, (uintmax_t)res->hr_resuid); pjdlog_error("%s", errmsg); nv_add_string(nvout, errmsg, "errmsg"); if (hast_proto_send(res, res->hr_remotein, nvout, NULL, 0) < 0) { pjdlog_exit(EX_TEMPFAIL, "Unable to send response to %s", res->hr_remoteaddr); } nv_free(nvout); exit(EX_CONFIG); } else if ( /* Is primary out-of-date? */ (res->hr_secondary_localcnt > res->hr_primary_remotecnt && res->hr_secondary_remotecnt == res->hr_primary_localcnt) || /* Are the nodes more or less in sync? */ (res->hr_secondary_localcnt == res->hr_primary_remotecnt && res->hr_secondary_remotecnt == res->hr_primary_localcnt) || /* Is secondary out-of-date? */ (res->hr_secondary_localcnt == res->hr_primary_remotecnt && res->hr_secondary_remotecnt < res->hr_primary_localcnt)) { /* * Nodes are more or less in sync or one of the nodes is * out-of-date. * It doesn't matter at this point which one, we just have to * send out local bitmap to the remote node. */ if (pread(res->hr_localfd, map, mapsize, METADATA_SIZE) != (ssize_t)mapsize) { pjdlog_exit(LOG_ERR, "Unable to read activemap"); } if (res->hr_secondary_localcnt > res->hr_primary_remotecnt && res->hr_secondary_remotecnt == res->hr_primary_localcnt) { /* Primary is out-of-date, sync from secondary. */ nv_add_uint8(nvout, HAST_SYNCSRC_SECONDARY, "syncsrc"); } else { /* * Secondary is out-of-date or counts match. * Sync from primary. */ nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc"); } } else if (res->hr_secondary_localcnt > res->hr_primary_remotecnt && res->hr_primary_localcnt > res->hr_secondary_remotecnt) { /* * Not good, we have split-brain condition. */ free(map); pjdlog_error("Split-brain detected, exiting."); nv_add_string(nvout, "Split-brain condition!", "errmsg"); if (hast_proto_send(res, res->hr_remotein, nvout, NULL, 0) < 0) { pjdlog_exit(EX_TEMPFAIL, "Unable to send response to %s", res->hr_remoteaddr); } nv_free(nvout); /* Exit on split-brain. */ event_send(res, EVENT_SPLITBRAIN); exit(EX_CONFIG); } else /* if (res->hr_secondary_localcnt < res->hr_primary_remotecnt || res->hr_primary_localcnt < res->hr_secondary_remotecnt) */ { /* * This should never happen in practise, but we will perform * full synchronization. */ PJDLOG_ASSERT(res->hr_secondary_localcnt < res->hr_primary_remotecnt || res->hr_primary_localcnt < res->hr_secondary_remotecnt); mapsize = activemap_calc_ondisk_size(res->hr_local_mediasize - METADATA_SIZE, res->hr_extentsize, res->hr_local_sectorsize); memset(map, 0xff, mapsize); if (res->hr_secondary_localcnt > res->hr_primary_remotecnt) { /* In this one of five cases sync from secondary. */ nv_add_uint8(nvout, HAST_SYNCSRC_SECONDARY, "syncsrc"); } else { /* For the rest four cases sync from primary. */ nv_add_uint8(nvout, HAST_SYNCSRC_PRIMARY, "syncsrc"); } pjdlog_warning("This should never happen, asking for full synchronization (primary(local=%ju, remote=%ju), secondary(local=%ju, remote=%ju)).", (uintmax_t)res->hr_primary_localcnt, (uintmax_t)res->hr_primary_remotecnt, (uintmax_t)res->hr_secondary_localcnt, (uintmax_t)res->hr_secondary_remotecnt); } nv_add_uint32(nvout, (uint32_t)mapsize, "mapsize"); if (hast_proto_send(res, res->hr_remotein, nvout, map, mapsize) < 0) { pjdlog_exit(EX_TEMPFAIL, "Unable to send activemap to %s", res->hr_remoteaddr); } if (map != NULL) free(map); nv_free(nvout); #ifdef notyet /* Setup direction. */ if (proto_recv(res->hr_remotein, NULL, 0) == -1) pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); #endif }
int sandbox(const char *user, bool capsicum, const char *fmt, ...) { #ifdef HAVE_JAIL struct jail jailst; char *jailhost; va_list ap; #endif struct passwd *pw; uid_t ruid, euid; gid_t rgid, egid; #ifdef HAVE_GETRESUID uid_t suid; #endif #ifdef HAVE_GETRESGID gid_t sgid; #endif gid_t *groups, *ggroups; bool jailed; int ngroups, ret; PJDLOG_ASSERT(user != NULL); PJDLOG_ASSERT(fmt != NULL); ret = -1; groups = NULL; ggroups = NULL; /* * According to getpwnam(3) we have to clear errno before calling the * function to be able to distinguish between an error and missing * entry (with is not treated as error by getpwnam(3)). */ errno = 0; pw = getpwnam(user); if (pw == NULL) { if (errno != 0) { pjdlog_errno(LOG_ERR, "Unable to find info about '%s' user", user); goto out; } else { pjdlog_error("'%s' user doesn't exist.", user); errno = ENOENT; goto out; } } ngroups = sysconf(_SC_NGROUPS_MAX); if (ngroups == -1) { pjdlog_errno(LOG_WARNING, "Unable to obtain maximum number of groups"); ngroups = NGROUPS_MAX; } ngroups++; /* For base gid. */ groups = malloc(sizeof(groups[0]) * ngroups); if (groups == NULL) { pjdlog_error("Unable to allocate memory for %d groups.", ngroups); goto out; } if (getgrouplist(user, pw->pw_gid, groups, &ngroups) == -1) { pjdlog_error("Unable to obtain groups of user %s.", user); goto out; } #ifdef HAVE_JAIL va_start(ap, fmt); (void)vasprintf(&jailhost, fmt, ap); va_end(ap); if (jailhost == NULL) { pjdlog_error("Unable to allocate memory for jail host name."); goto out; } bzero(&jailst, sizeof(jailst)); jailst.version = JAIL_API_VERSION; jailst.path = pw->pw_dir; jailst.hostname = jailhost; if (jail(&jailst) >= 0) { jailed = true; } else { jailed = false; pjdlog_errno(LOG_WARNING, "Unable to jail to directory %s", pw->pw_dir); } free(jailhost); #else /* !HAVE_JAIL */ jailed = false; #endif /* !HAVE_JAIL */ if (!jailed) { if (chroot(pw->pw_dir) == -1) { pjdlog_errno(LOG_ERR, "Unable to change root directory to %s", pw->pw_dir); goto out; } } PJDLOG_VERIFY(chdir("/") == 0); if (setgroups(ngroups, groups) == -1) { pjdlog_errno(LOG_ERR, "Unable to set groups"); goto out; } if (setgid(pw->pw_gid) == -1) { pjdlog_errno(LOG_ERR, "Unable to set gid to %u", (unsigned int)pw->pw_gid); goto out; } if (setuid(pw->pw_uid) == -1) { pjdlog_errno(LOG_ERR, "Unable to set uid to %u", (unsigned int)pw->pw_uid); goto out; } #ifdef HAVE_CAP_ENTER if (capsicum) { capsicum = (cap_enter() == 0); if (!capsicum) { pjdlog_common(LOG_DEBUG, 1, errno, "Unable to sandbox using capsicum"); } } #else /* !HAVE_CAP_ENTER */ capsicum = false; #endif /* !HAVE_CAP_ENTER */ /* * Better be sure that everything succeeded. */ #ifdef HAVE_GETRESUID PJDLOG_VERIFY(getresuid(&ruid, &euid, &suid) == 0); PJDLOG_VERIFY(suid == pw->pw_uid); #else ruid = getuid(); euid = geteuid(); #endif PJDLOG_VERIFY(ruid == pw->pw_uid); PJDLOG_VERIFY(euid == pw->pw_uid); #ifdef HAVE_GETRESGID PJDLOG_VERIFY(getresgid(&rgid, &egid, &sgid) == 0); PJDLOG_VERIFY(sgid == pw->pw_gid); #else rgid = getgid(); egid = getegid(); #endif PJDLOG_VERIFY(rgid == pw->pw_gid); PJDLOG_VERIFY(egid == pw->pw_gid); PJDLOG_VERIFY(getgroups(0, NULL) == ngroups); ggroups = malloc(sizeof(ggroups[0]) * ngroups); if (ggroups == NULL) { pjdlog_error("Unable to allocate memory for %d groups.", ngroups); goto out; } PJDLOG_VERIFY(getgroups(ngroups, ggroups) == ngroups); qsort(groups, (size_t)ngroups, sizeof(groups[0]), groups_compare); qsort(ggroups, (size_t)ngroups, sizeof(ggroups[0]), groups_compare); PJDLOG_VERIFY(bcmp(groups, ggroups, sizeof(groups[0]) * ngroups) == 0); pjdlog_debug(1, "Privileges successfully dropped using %s%s+setgid+setuid.", capsicum ? "capsicum+" : "", jailed ? "jail" : "chroot"); ret = 0; out: if (groups != NULL) free(groups); if (ggroups != NULL) free(ggroups); return (ret); }
int drop_privs(const struct hast_resource *res) { char jailhost[sizeof(res->hr_name) * 2]; struct jail jailst; struct passwd *pw; uid_t ruid, euid, suid; gid_t rgid, egid, sgid; gid_t gidset[1]; bool capsicum, jailed; /* * According to getpwnam(3) we have to clear errno before calling the * function to be able to distinguish between an error and missing * entry (with is not treated as error by getpwnam(3)). */ errno = 0; pw = getpwnam(HAST_USER); if (pw == NULL) { if (errno != 0) { pjdlog_errno(LOG_ERR, "Unable to find info about '%s' user", HAST_USER); return (-1); } else { pjdlog_error("'%s' user doesn't exist.", HAST_USER); errno = ENOENT; return (-1); } } bzero(&jailst, sizeof(jailst)); jailst.version = JAIL_API_VERSION; jailst.path = pw->pw_dir; if (res == NULL) { (void)snprintf(jailhost, sizeof(jailhost), "hastctl"); } else { (void)snprintf(jailhost, sizeof(jailhost), "hastd: %s (%s)", res->hr_name, role2str(res->hr_role)); } jailst.hostname = jailhost; jailst.jailname = NULL; jailst.ip4s = 0; jailst.ip4 = NULL; jailst.ip6s = 0; jailst.ip6 = NULL; if (jail(&jailst) >= 0) { jailed = true; } else { jailed = false; pjdlog_errno(LOG_WARNING, "Unable to jail to directory to %s", pw->pw_dir); if (chroot(pw->pw_dir) == -1) { pjdlog_errno(LOG_ERR, "Unable to change root directory to %s", pw->pw_dir); return (-1); } } PJDLOG_VERIFY(chdir("/") == 0); gidset[0] = pw->pw_gid; if (setgroups(1, gidset) == -1) { pjdlog_errno(LOG_ERR, "Unable to set groups to gid %u", (unsigned int)pw->pw_gid); return (-1); } if (setgid(pw->pw_gid) == -1) { pjdlog_errno(LOG_ERR, "Unable to set gid to %u", (unsigned int)pw->pw_gid); return (-1); } if (setuid(pw->pw_uid) == -1) { pjdlog_errno(LOG_ERR, "Unable to set uid to %u", (unsigned int)pw->pw_uid); return (-1); } #ifdef HAVE_CAPSICUM capsicum = (cap_enter() == 0); if (!capsicum) { pjdlog_common(LOG_DEBUG, 1, errno, "Unable to sandbox using capsicum"); } else if (res != NULL) { cap_rights_t rights; static const unsigned long geomcmds[] = { DIOCGDELETE, DIOCGFLUSH }; PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY || res->hr_role == HAST_ROLE_SECONDARY); cap_rights_init(&rights, CAP_FLOCK, CAP_IOCTL, CAP_PREAD, CAP_PWRITE); if (cap_rights_limit(res->hr_localfd, &rights) == -1) { pjdlog_errno(LOG_ERR, "Unable to limit capability rights on local descriptor"); } if (cap_ioctls_limit(res->hr_localfd, geomcmds, sizeof(geomcmds) / sizeof(geomcmds[0])) == -1) { pjdlog_errno(LOG_ERR, "Unable to limit allowed GEOM ioctls"); } if (res->hr_role == HAST_ROLE_PRIMARY) { static const unsigned long ggatecmds[] = { G_GATE_CMD_MODIFY, G_GATE_CMD_START, G_GATE_CMD_DONE, G_GATE_CMD_DESTROY }; cap_rights_init(&rights, CAP_IOCTL); if (cap_rights_limit(res->hr_ggatefd, &rights) == -1) { pjdlog_errno(LOG_ERR, "Unable to limit capability rights to CAP_IOCTL on ggate descriptor"); } if (cap_ioctls_limit(res->hr_ggatefd, ggatecmds, sizeof(ggatecmds) / sizeof(ggatecmds[0])) == -1) { pjdlog_errno(LOG_ERR, "Unable to limit allowed ggate ioctls"); } } } #else capsicum = false; #endif /* * Better be sure that everything succeeded. */ PJDLOG_VERIFY(getresuid(&ruid, &euid, &suid) == 0); PJDLOG_VERIFY(ruid == pw->pw_uid); PJDLOG_VERIFY(euid == pw->pw_uid); PJDLOG_VERIFY(suid == pw->pw_uid); PJDLOG_VERIFY(getresgid(&rgid, &egid, &sgid) == 0); PJDLOG_VERIFY(rgid == pw->pw_gid); PJDLOG_VERIFY(egid == pw->pw_gid); PJDLOG_VERIFY(sgid == pw->pw_gid); PJDLOG_VERIFY(getgroups(0, NULL) == 1); PJDLOG_VERIFY(getgroups(1, gidset) == 1); PJDLOG_VERIFY(gidset[0] == pw->pw_gid); pjdlog_debug(1, "Privileges successfully dropped using %s%s+setgid+setuid.", capsicum ? "capsicum+" : "", jailed ? "jail" : "chroot"); return (0); }
int metadata_read(struct hast_resource *res, bool openrw) { unsigned char *buf; struct ebuf *eb; struct nv *nv; ssize_t done; const char *str; int rerrno; bool opened_here; opened_here = false; rerrno = 0; /* * Is this first metadata_read() call for this resource? */ if (res->hr_localfd == -1) { if (provinfo(res, openrw) == -1) { rerrno = errno; goto fail; } opened_here = true; pjdlog_debug(1, "Obtained info about %s.", res->hr_localpath); if (openrw) { if (flock(res->hr_localfd, LOCK_EX | LOCK_NB) == -1) { rerrno = errno; if (errno == EOPNOTSUPP) { pjdlog_warning("Unable to lock %s (operation not supported), but continuing.", res->hr_localpath); } else { pjdlog_errno(LOG_ERR, "Unable to lock %s", res->hr_localpath); goto fail; } } pjdlog_debug(1, "Locked %s.", res->hr_localpath); } } eb = ebuf_alloc(METADATA_SIZE); if (eb == NULL) { rerrno = errno; pjdlog_errno(LOG_ERR, "Unable to allocate memory to read metadata"); goto fail; } if (ebuf_add_tail(eb, NULL, METADATA_SIZE) == -1) { rerrno = errno; pjdlog_errno(LOG_ERR, "Unable to allocate memory to read metadata"); ebuf_free(eb); goto fail; } buf = ebuf_data(eb, NULL); PJDLOG_ASSERT(buf != NULL); done = pread(res->hr_localfd, buf, METADATA_SIZE, 0); if (done == -1 || done != METADATA_SIZE) { rerrno = errno; pjdlog_errno(LOG_ERR, "Unable to read metadata"); ebuf_free(eb); goto fail; } nv = nv_ntoh(eb); if (nv == NULL) { rerrno = errno; pjdlog_errno(LOG_ERR, "Metadata read from %s is invalid", res->hr_localpath); ebuf_free(eb); goto fail; } str = nv_get_string(nv, "resource"); if (str != NULL && strcmp(str, res->hr_name) != 0) { pjdlog_error("Provider %s is not part of resource %s.", res->hr_localpath, res->hr_name); nv_free(nv); goto fail; } res->hr_datasize = nv_get_uint64(nv, "datasize"); res->hr_extentsize = (int)nv_get_uint32(nv, "extentsize"); res->hr_keepdirty = (int)nv_get_uint32(nv, "keepdirty"); res->hr_localoff = nv_get_uint64(nv, "offset"); res->hr_resuid = nv_get_uint64(nv, "resuid"); if (res->hr_role != HAST_ROLE_PRIMARY) { /* Secondary or init role. */ res->hr_secondary_localcnt = nv_get_uint64(nv, "localcnt"); res->hr_secondary_remotecnt = nv_get_uint64(nv, "remotecnt"); } if (res->hr_role != HAST_ROLE_SECONDARY) { /* Primary or init role. */ res->hr_primary_localcnt = nv_get_uint64(nv, "localcnt"); res->hr_primary_remotecnt = nv_get_uint64(nv, "remotecnt"); } str = nv_get_string(nv, "prevrole"); if (str != NULL) { if (strcmp(str, "primary") == 0) res->hr_previous_role = HAST_ROLE_PRIMARY; else if (strcmp(str, "secondary") == 0) res->hr_previous_role = HAST_ROLE_SECONDARY; } if (nv_error(nv) != 0) { errno = rerrno = nv_error(nv); pjdlog_errno(LOG_ERR, "Unable to read metadata from %s", res->hr_localpath); nv_free(nv); goto fail; } nv_free(nv); return (0); fail: if (opened_here) { close(res->hr_localfd); res->hr_localfd = -1; } errno = rerrno; return (-1); }
static bool decrypt(const char *privkeyfile, const char *keyfile, const char *input, const char *output) { uint8_t buf[KERNELDUMP_BUFFER_SIZE], key[KERNELDUMP_KEY_MAX_SIZE]; EVP_CIPHER_CTX ctx; const EVP_CIPHER *cipher; FILE *fp; struct kerneldumpkey *kdk; RSA *privkey; int ifd, kfd, ofd, olen, privkeysize; ssize_t bytes; pid_t pid; PJDLOG_ASSERT(privkeyfile != NULL); PJDLOG_ASSERT(keyfile != NULL); PJDLOG_ASSERT(input != NULL); PJDLOG_ASSERT(output != NULL); privkey = NULL; /* * Decrypt a core dump in a child process so we can unlink a partially * decrypted core if the child process fails. */ pid = fork(); if (pid == -1) { pjdlog_errno(LOG_ERR, "Unable to create child process"); return (false); } if (pid > 0) return (wait_for_process(pid) == 0); kfd = open(keyfile, O_RDONLY); if (kfd == -1) { pjdlog_errno(LOG_ERR, "Unable to open %s", keyfile); goto failed; } ifd = open(input, O_RDONLY); if (ifd == -1) { pjdlog_errno(LOG_ERR, "Unable to open %s", input); goto failed; } ofd = open(output, O_WRONLY | O_CREAT | O_TRUNC, 0600); if (ofd == -1) { pjdlog_errno(LOG_ERR, "Unable to open %s", output); goto failed; } fp = fopen(privkeyfile, "r"); if (fp == NULL) { pjdlog_errno(LOG_ERR, "Unable to open %s", privkeyfile); goto failed; } if (cap_enter() < 0 && errno != ENOSYS) { pjdlog_errno(LOG_ERR, "Unable to enter capability mode"); goto failed; } privkey = RSA_new(); if (privkey == NULL) { pjdlog_error("Unable to allocate an RSA structure: %s", ERR_error_string(ERR_get_error(), NULL)); goto failed; } EVP_CIPHER_CTX_init(&ctx); kdk = read_key(kfd); close(kfd); if (kdk == NULL) goto failed; privkey = PEM_read_RSAPrivateKey(fp, &privkey, NULL, NULL); fclose(fp); if (privkey == NULL) { pjdlog_error("Unable to read data from %s.", privkeyfile); goto failed; } privkeysize = RSA_size(privkey); if (privkeysize != (int)kdk->kdk_encryptedkeysize) { pjdlog_error("RSA modulus size mismatch: equals %db and should be %ub.", 8 * privkeysize, 8 * kdk->kdk_encryptedkeysize); goto failed; } switch (kdk->kdk_encryption) { case KERNELDUMP_ENC_AES_256_CBC: cipher = EVP_aes_256_cbc(); break; default: pjdlog_error("Invalid encryption algorithm."); goto failed; } if (RSA_private_decrypt(kdk->kdk_encryptedkeysize, kdk->kdk_encryptedkey, key, privkey, RSA_PKCS1_PADDING) != sizeof(key)) { pjdlog_error("Unable to decrypt key: %s", ERR_error_string(ERR_get_error(), NULL)); goto failed; } RSA_free(privkey); privkey = NULL; EVP_DecryptInit_ex(&ctx, cipher, NULL, key, kdk->kdk_iv); EVP_CIPHER_CTX_set_padding(&ctx, 0); explicit_bzero(key, sizeof(key)); do { bytes = read(ifd, buf, sizeof(buf)); if (bytes < 0) { pjdlog_errno(LOG_ERR, "Unable to read data from %s", input); goto failed; } else if (bytes == 0) { break; } if (bytes > 0) { if (EVP_DecryptUpdate(&ctx, buf, &olen, buf, bytes) == 0) { pjdlog_error("Unable to decrypt core."); goto failed; } } else { if (EVP_DecryptFinal_ex(&ctx, buf, &olen) == 0) { pjdlog_error("Unable to decrypt core."); goto failed; } } if (olen == 0) continue; if (write(ofd, buf, olen) != olen) { pjdlog_errno(LOG_ERR, "Unable to write data to %s", output); goto failed; } } while (bytes > 0); explicit_bzero(buf, sizeof(buf)); EVP_CIPHER_CTX_cleanup(&ctx); exit(0); failed: explicit_bzero(key, sizeof(key)); explicit_bzero(buf, sizeof(buf)); RSA_free(privkey); EVP_CIPHER_CTX_cleanup(&ctx); exit(1); }