static int dump_one_packet_fd(int lfd, u32 id, const struct fd_parms *p) { PacketSockEntry psk = PACKET_SOCK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; struct packet_sock_desc *sd; int i, ret; sd = (struct packet_sock_desc *)lookup_socket(p->stat.st_ino, PF_PACKET, 0); if (IS_ERR_OR_NULL(sd)) { pr_err("Can't find packet socket %"PRIu64"\n", p->stat.st_ino); return -1; } pr_info("Dumping packet socket fd %d id %#x\n", lfd, id); BUG_ON(sd->sd.already_dumped); sd->sd.already_dumped = 1; psk.id = sd->file_id = id; psk.type = sd->type; psk.flags = p->flags; psk.fown = (FownEntry *)&p->fown; psk.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) return -1; psk.protocol = sd->proto; psk.ifindex = sd->nli.pdi_index; psk.version = sd->nli.pdi_version; psk.reserve = sd->nli.pdi_reserve; psk.timestamp = sd->nli.pdi_tstamp; psk.copy_thresh = sd->nli.pdi_copy_thresh; psk.aux_data = (sd->nli.pdi_flags & PDI_AUXDATA ? true : false); psk.orig_dev = (sd->nli.pdi_flags & PDI_ORIGDEV ? true : false); psk.vnet_hdr = (sd->nli.pdi_flags & PDI_VNETHDR ? true : false); psk.loss = (sd->nli.pdi_flags & PDI_LOSS ? true : false); ret = dump_mreqs(&psk, sd); if (ret) goto out; if (sd->fanout != NO_FANOUT) { psk.has_fanout = true; psk.fanout = sd->fanout; } ret = dump_rings(&psk, sd); if (ret) goto out; ret = pb_write_one(img_from_set(glob_imgset, CR_FD_PACKETSK), &psk, PB_PACKET_SOCK); out: release_skopts(&skopts); xfree(psk.rx_ring); xfree(psk.tx_ring); for (i = 0; i < psk.n_mclist; i++) xfree(psk.mclist[i]->addr.data); xfree(psk.mclist); return ret; }
static int dump_ipc_data(const struct cr_imgset *imgset) { int ret; ret = dump_ipc_var(img_from_set(imgset, CR_FD_IPC_VAR)); if (ret < 0) return ret; ret = dump_ipc_shm(img_from_set(imgset, CR_FD_IPCNS_SHM)); if (ret < 0) return ret; ret = dump_ipc_msg(img_from_set(imgset, CR_FD_IPCNS_MSG)); if (ret < 0) return ret; ret = dump_ipc_sem(img_from_set(imgset, CR_FD_IPCNS_SEM)); if (ret < 0) return ret; return 0; }
static int dump_tunfile(int lfd, u32 id, const struct fd_parms *p) { int ret; struct cr_img *img; TunfileEntry tfe = TUNFILE_ENTRY__INIT; struct ifreq ifr; if (!(root_ns_mask & CLONE_NEWNET)) { pr_err("Net namespace is required to dump tun link\n"); return -1; } if (dump_one_reg_file(lfd, id, p)) return -1; pr_info("Dumping tun-file %d with id %#x\n", lfd, id); tfe.id = id; ret = ioctl(lfd, TUNGETIFF, &ifr); if (ret < 0) { if (errno != EBADFD) { pr_perror("Can't dump tun-file device"); return -1; } /* * Otherwise this is just opened file with not yet attached * tun device. Go agead an write the respective entry. */ } else { tfe.netdev = ifr.ifr_name; pr_info("`- attached to device %s (flags %x)\n", tfe.netdev, ifr.ifr_flags); if (ifr.ifr_flags & IFF_DETACH_QUEUE) { tfe.has_detached = true; tfe.detached = true; } if (dump_tun_link_fd(lfd, tfe.netdev, ifr.ifr_flags) == NULL) return -1; } img = img_from_set(glob_imgset, CR_FD_TUNFILE); return pb_write_one(img, &tfe, PB_TUNFILE); }
static int dump_one_signalfd(int lfd, u32 id, const struct fd_parms *p) { SignalfdEntry sfd = SIGNALFD_ENTRY__INIT; FileEntry fe = FILE_ENTRY__INIT; if (parse_fdinfo(lfd, FD_TYPES__SIGNALFD, &sfd)) return -1; sfd.id = id; sfd.flags = p->flags; sfd.fown = (FownEntry *)&p->fown; fe.type = FD_TYPES__SIGNALFD; fe.id = sfd.id; fe.sgfd = &sfd; return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE); }
int dump_one_ns_file(int lfd, u32 id, const struct fd_parms *p) { struct cr_img *img = img_from_set(glob_imgset, CR_FD_NS_FILES); NsFileEntry nfe = NS_FILE_ENTRY__INIT; struct fd_link *link = p->link; struct ns_id *nsid; nsid = lookup_ns_by_kid(link->ns_kid, link->ns_d); if (!nsid) { pr_err("No NS ID with kid %u\n", link->ns_kid); return -1; } nfe.id = id; nfe.ns_id = nsid->id; nfe.ns_cflag = link->ns_d->cflag; nfe.flags = p->flags; return pb_write_one(img, &nfe, PB_NS_FILE); }
static int dump_seccomp_filters(void) { SeccompEntry se = SECCOMP_ENTRY__INIT; int ret = -1, i; /* If we didn't collect any filters, don't create a seccomp image at all. */ if (next_filter_id == 0) return 0; se.seccomp_filters = xzalloc(sizeof(*se.seccomp_filters) * next_filter_id); if (!se.seccomp_filters) return -1; se.n_seccomp_filters = next_filter_id; for (i = 0; i < next_filter_id; i++) { SeccompFilter *sf; struct seccomp_info *cur = filters[i]; sf = se.seccomp_filters[cur->id] = &cur->filter; if (cur->prev) { sf->has_prev = true; sf->prev = cur->prev->id; } } ret = pb_write_one(img_from_set(glob_imgset, CR_FD_SECCOMP), &se, PB_SECCOMP); xfree(se.seccomp_filters); for (i = 0; i < next_filter_id; i++) { struct seccomp_info *freeme = filters[i]; xfree(freeme->filter.filter.data); xfree(freeme); } xfree(filters); return ret; }
static int dump_one_netlink_fd(int lfd, u32 id, const struct fd_parms *p) { struct netlink_sk_desc *sk; NetlinkSkEntry ne = NETLINK_SK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; sk = (struct netlink_sk_desc *)lookup_socket(p->stat.st_ino, PF_NETLINK, 0); if (IS_ERR(sk)) goto err; ne.id = id; ne.ino = p->stat.st_ino; if (!can_dump_netlink_sk(lfd)) goto err; if (sk) { BUG_ON(sk->sd.already_dumped); ne.protocol = sk->protocol; ne.portid = sk->portid; ne.groups = sk->groups; ne.n_groups = sk->gsize / sizeof(ne.groups[0]); /* * On 64-bit sk->gsize is multiple to 8 bytes (sizeof(long)), * so remove the last 4 bytes if they are empty. */ if (ne.n_groups && sk->groups[ne.n_groups - 1] == 0) ne.n_groups -= 1; if (ne.n_groups > 1) { pr_err("%d %x\n", sk->gsize, sk->groups[1]); pr_err("The netlink socket 0x%x has more than 32 groups\n", ne.ino); return -1; } if (sk->groups && !sk->portid) { pr_err("The netlink socket 0x%x is bound to groups but not to portid\n", ne.ino); return -1; } ne.state = sk->state; ne.dst_portid = sk->dst_portid; ne.dst_group = sk->dst_group; } else { /* unconnected and unbound socket */ int val; socklen_t aux = sizeof(val); if (getsockopt(lfd, SOL_SOCKET, SO_PROTOCOL, &val, &aux) < 0) { pr_perror("Unable to get protocol for netlink socket"); goto err; } ne.protocol = val; } ne.fown = (FownEntry *)&p->fown; ne.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) goto err; if (pb_write_one(img_from_set(glob_imgset, CR_FD_NETLINK_SK), &ne, PB_NETLINK_SK)) goto err; return 0; err: return -1; }
static int dump_one_netlink_fd(int lfd, u32 id, const struct fd_parms *p) { struct netlink_sk_desc *sk; FileEntry fe = FILE_ENTRY__INIT; NetlinkSkEntry ne = NETLINK_SK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; sk = (struct netlink_sk_desc *)lookup_socket(p->stat.st_ino, PF_NETLINK, 0); if (IS_ERR(sk)) goto err; ne.id = id; ne.ino = p->stat.st_ino; if (!can_dump_netlink_sk(lfd)) goto err; if (sk) { BUG_ON(sk->sd.already_dumped); ne.protocol = sk->protocol; ne.portid = sk->portid; ne.groups = sk->groups; ne.n_groups = sk->gsize / sizeof(ne.groups[0]); /* * On 64-bit sk->gsize is multiple to 8 bytes (sizeof(long)), * so remove the last 4 bytes if they are empty. */ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ /* * Big endian swap: Ugly hack for zdtm/static/sk-netlink * * For big endian systems: * * - sk->groups[0] are bits 32-64 * - sk->groups[1] are bits 0-32 */ if (ne.n_groups == 2) { uint32_t tmp = sk->groups[1]; sk->groups[1] = sk->groups[0]; sk->groups[0] = tmp; } #endif if (ne.n_groups && sk->groups[ne.n_groups - 1] == 0) ne.n_groups -= 1; if (ne.n_groups > 1) { pr_err("%d %x\n", sk->gsize, sk->groups[1]); pr_err("The netlink socket 0x%x has more than 32 groups\n", ne.ino); return -1; } if (sk->groups && !sk->portid) { pr_err("The netlink socket 0x%x is bound to groups but not to portid\n", ne.ino); return -1; } ne.state = sk->state; ne.dst_portid = sk->dst_portid; ne.dst_group = sk->dst_group; } else { /* unconnected and unbound socket */ int val; socklen_t aux = sizeof(val); if (getsockopt(lfd, SOL_SOCKET, SO_PROTOCOL, &val, &aux) < 0) { pr_perror("Unable to get protocol for netlink socket"); goto err; } ne.protocol = val; } ne.fown = (FownEntry *)&p->fown; ne.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) goto err; fe.type = FD_TYPES__NETLINKSK; fe.id = ne.id; fe.nlsk = ≠ if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE)) goto err; return 0; err: return -1; }