int parasite_dump_sigacts_seized(struct parasite_ctl *ctl, struct cr_fdset *cr_fdset) { struct parasite_dump_sa_args *args; int ret, sig, fd; SaEntry se = SA_ENTRY__INIT; args = parasite_args(ctl, struct parasite_dump_sa_args); ret = parasite_execute(PARASITE_CMD_DUMP_SIGACTS, ctl); if (ret < 0) return ret; fd = fdset_fd(cr_fdset, CR_FD_SIGACT); for (sig = 1; sig <= SIGMAX; sig++) { int i = sig - 1; if (sig == SIGSTOP || sig == SIGKILL) continue; ASSIGN_TYPED(se.sigaction, encode_pointer(args->sas[i].rt_sa_handler)); ASSIGN_TYPED(se.flags, args->sas[i].rt_sa_flags); ASSIGN_TYPED(se.restorer, encode_pointer(args->sas[i].rt_sa_restorer)); ASSIGN_TYPED(se.mask, args->sas[i].rt_sa_mask.sig[0]); if (pb_write_one(fd, &se, PB_SIGACT) < 0) return -1; } return 0; }
void write_stats(int what) { StatsEntry stats = STATS_ENTRY__INIT; DumpStatsEntry dstats = DUMP_STATS_ENTRY__INIT; char *name; int fd; pr_info("Writing stats\n"); if (what == DUMP_STATS) { stats.dump = &dstats; encode_time(TIME_FREEZING, &dstats.freezing_time); encode_time(TIME_FROZEN, &dstats.frozen_time); encode_time(TIME_MEMDUMP, &dstats.memdump_time); encode_time(TIME_MEMWRITE, &dstats.memwrite_time); dstats.pages_scanned = counts[CNT_PAGES_SCANNED]; dstats.pages_skipped_parent = counts[CNT_PAGES_SKIPPED_PARENT]; dstats.pages_written = counts[CNT_PAGES_WRITTEN]; name = "dump"; } else return; fd = open_image(CR_FD_STATS, O_DUMP, name); if (fd >= 0) { pb_write_one(fd, &stats, PB_STATS); close(fd); } }
static int dump_one_packet_fd(int lfd, u32 id, const struct fd_parms *p) { PacketSockEntry psk = PACKET_SOCK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; struct packet_sock_desc *sd; int i, ret; sd = (struct packet_sock_desc *)lookup_socket(p->stat.st_ino, PF_PACKET, 0); if (IS_ERR_OR_NULL(sd)) { pr_err("Can't find packet socket %"PRIu64"\n", p->stat.st_ino); return -1; } pr_info("Dumping packet socket fd %d id %#x\n", lfd, id); BUG_ON(sd->sd.already_dumped); sd->sd.already_dumped = 1; psk.id = sd->file_id = id; psk.type = sd->type; psk.flags = p->flags; psk.fown = (FownEntry *)&p->fown; psk.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) return -1; psk.protocol = sd->proto; psk.ifindex = sd->nli.pdi_index; psk.version = sd->nli.pdi_version; psk.reserve = sd->nli.pdi_reserve; psk.timestamp = sd->nli.pdi_tstamp; psk.copy_thresh = sd->nli.pdi_copy_thresh; psk.aux_data = (sd->nli.pdi_flags & PDI_AUXDATA ? true : false); psk.orig_dev = (sd->nli.pdi_flags & PDI_ORIGDEV ? true : false); psk.vnet_hdr = (sd->nli.pdi_flags & PDI_VNETHDR ? true : false); psk.loss = (sd->nli.pdi_flags & PDI_LOSS ? true : false); ret = dump_mreqs(&psk, sd); if (ret) goto out; if (sd->fanout != NO_FANOUT) { psk.has_fanout = true; psk.fanout = sd->fanout; } ret = dump_rings(&psk, sd); if (ret) goto out; ret = pb_write_one(img_from_set(glob_imgset, CR_FD_PACKETSK), &psk, PB_PACKET_SOCK); out: release_skopts(&skopts); xfree(psk.rx_ring); xfree(psk.tx_ring); for (i = 0; i < psk.n_mclist; i++) xfree(psk.mclist[i]->addr.data); xfree(psk.mclist); return ret; }
int dump_uts_ns(int ns_id) { int ret; struct cr_img *img; struct utsname ubuf; UtsnsEntry ue = UTSNS_ENTRY__INIT; img = open_image(CR_FD_UTSNS, O_DUMP, ns_id); if (!img) return -1; ret = uname(&ubuf); if (ret < 0) { pr_perror("Error calling uname"); goto err; } ue.nodename = ubuf.nodename; ue.domainname = ubuf.domainname; ret = pb_write_one(img, &ue, PB_UTSNS); err: close_image(img); return ret < 0 ? -1 : 0; }
int write_img_inventory(void) { int fd; InventoryEntry he = INVENTORY_ENTRY__INIT; struct pstree_item crt = { }; pr_info("Writing image inventory (version %u)\n", CRTOOLS_IMAGES_V1); fd = open_image(CR_FD_INVENTORY, O_DUMP); if (fd < 0) return -1; he.img_version = CRTOOLS_IMAGES_V1; he.fdinfo_per_id = true; he.has_fdinfo_per_id = true; he.ns_per_id = true; he.has_ns_per_id = true; crt.state = TASK_ALIVE; crt.pid.real = getpid(); if (get_task_ids(&crt)) { close(fd); return -1; } he.root_ids = crt.ids; if (pb_write_one(fd, &he, PB_INVENTORY) < 0) return -1; xfree(crt.ids); close(fd); return 0; }
int cpu_dump_cpuinfo(void) { CpuinfoEntry cpu_info = CPUINFO_ENTRY__INIT; CpuinfoX86Entry cpu_x86_info = CPUINFO_X86_ENTRY__INIT; CpuinfoX86Entry *cpu_x86_info_ptr = &cpu_x86_info; struct cr_img *img; img = open_image(CR_FD_CPUINFO, O_DUMP); if (!img) return -1; cpu_info.x86_entry = &cpu_x86_info_ptr; cpu_info.n_x86_entry = 1; cpu_x86_info.vendor_id = (rt_cpu_info.x86_vendor == X86_VENDOR_INTEL) ? CPUINFO_X86_ENTRY__VENDOR__INTEL : CPUINFO_X86_ENTRY__VENDOR__AMD; cpu_x86_info.cpu_family = rt_cpu_info.x86_family; cpu_x86_info.model = rt_cpu_info.x86_model; cpu_x86_info.stepping = rt_cpu_info.x86_mask; cpu_x86_info.capability_ver = 1; cpu_x86_info.n_capability = ARRAY_SIZE(rt_cpu_info.x86_capability); cpu_x86_info.capability = (void *)rt_cpu_info.x86_capability; if (rt_cpu_info.x86_model_id[0]) cpu_x86_info.model_id = rt_cpu_info.x86_model_id; if (pb_write_one(img, &cpu_info, PB_CPUINFO) < 0) { close_image(img); return -1; } close_image(img); return 0; }
static int dump_ipc_var(struct cr_img *img) { IpcVarEntry var = IPC_VAR_ENTRY__INIT; int ret = -1; var.n_sem_ctls = 4; var.sem_ctls = xmalloc(pb_repeated_size(&var, sem_ctls)); if (!var.sem_ctls) goto err; ret = ipc_sysctl_req(&var, CTL_READ); if (ret < 0) { pr_err("Failed to read IPC variables\n"); goto err; } ret = pb_write_one(img, &var, PB_IPC_VAR); if (ret < 0) { pr_err("Failed to write IPC variables\n"); goto err; } err: xfree(var.sem_ctls); return ret; }
static int dump_ipc_msg_queue_messages(int fd, const IpcMsgEntry *msq, unsigned int msg_nr) { struct msgbuf *message = NULL; unsigned int msgmax; int ret, msg_cnt = 0; struct sysctl_req req[] = { { "kernel/msgmax", &msgmax, CTL_U32 }, { }, }; ret = sysctl_op(req, CTL_READ); if (ret < 0) { pr_err("Failed to read max IPC message size\n"); goto err; } msgmax += sizeof(struct msgbuf); message = xmalloc(round_up(msgmax, sizeof(u64))); if (message == NULL) { pr_err("Failed to allocate memory for IPC message\n"); return -ENOMEM; } for (msg_cnt = 0; msg_cnt < msg_nr; msg_cnt++) { IpcMsg msg = IPC_MSG__INIT; size_t rounded; ret = msgrcv(msq->desc->id, message, msgmax, msg_cnt, IPC_NOWAIT | MSG_COPY); if (ret < 0) { pr_perror("Failed to copy IPC message"); goto err; } msg.msize = ret; msg.mtype = message->mtype; pr_info_ipc_msg(msg_cnt, &msg); ret = pb_write_one(fd, &msg, PB_IPCNS_MSG); if (ret < 0) { pr_err("Failed to write IPC message header\n"); break; } rounded = round_up(msg.msize, sizeof(u64)); memzero(((void *)message->mtext + msg.msize), rounded - msg.msize); ret = write_img_buf(fd, message->mtext, rounded); if (ret < 0) { pr_err("Failed to write IPC message data\n"); break; } } ret = 0; err: xfree(message); return ret; }
static int dump_one_timer(struct itimerval *v, int fd) { ItimerEntry ie = ITIMER_ENTRY__INIT; ie.isec = v->it_interval.tv_sec; ie.iusec = v->it_interval.tv_usec; ie.vsec = v->it_value.tv_sec; ie.vusec = v->it_value.tv_usec; return pb_write_one(fd, &ie, PB_ITIMERS); }
void write_stats(int what) { StatsEntry stats = STATS_ENTRY__INIT; DumpStatsEntry ds_entry = DUMP_STATS_ENTRY__INIT; RestoreStatsEntry rs_entry = RESTORE_STATS_ENTRY__INIT; char *name; struct cr_img *img; pr_info("Writing stats\n"); if (what == DUMP_STATS) { stats.dump = &ds_entry; encode_time(TIME_FREEZING, &ds_entry.freezing_time); encode_time(TIME_FROZEN, &ds_entry.frozen_time); encode_time(TIME_MEMDUMP, &ds_entry.memdump_time); encode_time(TIME_MEMWRITE, &ds_entry.memwrite_time); ds_entry.has_irmap_resolve = true; encode_time(TIME_IRMAP_RESOLVE, &ds_entry.irmap_resolve); ds_entry.pages_scanned = dstats->counts[CNT_PAGES_SCANNED]; ds_entry.pages_skipped_parent = dstats->counts[CNT_PAGES_SKIPPED_PARENT]; ds_entry.pages_written = dstats->counts[CNT_PAGES_WRITTEN]; ds_entry.pages_zero = dstats->counts[CNT_PAGES_ZERO]; ds_entry.pages_lazy = dstats->counts[CNT_PAGES_LAZY]; name = "dump"; } else if (what == RESTORE_STATS) { stats.restore = &rs_entry; rs_entry.pages_compared = atomic_read(&rstats->counts[CNT_PAGES_COMPARED]); rs_entry.pages_skipped_cow = atomic_read(&rstats->counts[CNT_PAGES_SKIPPED_COW]); rs_entry.has_pages_restored = true; rs_entry.pages_restored = atomic_read(&rstats->counts[CNT_PAGES_RESTORED]); encode_time(TIME_FORK, &rs_entry.forking_time); encode_time(TIME_RESTORE, &rs_entry.restore_time); name = "restore"; } else return; img = open_image_at(AT_FDCWD, CR_FD_STATS, O_DUMP, name); if (img) { pb_write_one(img, &stats, PB_STATS); close_image(img); } }
int write_img_inventory(InventoryEntry *he) { struct cr_img *img; pr_info("Writing image inventory (version %u)\n", CRTOOLS_IMAGES_V1); img = open_image(CR_FD_INVENTORY, O_DUMP); if (!img) return -1; if (pb_write_one(img, he, PB_INVENTORY) < 0) return -1; xfree(he->root_ids); close_image(img); return 0; }
struct cr_img *open_pages_image_at(int dfd, unsigned long flags, struct cr_img *pmi, u32 *id) { if (flags == O_RDONLY || flags == O_RDWR) { PagemapHead *h; if (pb_read_one(pmi, &h, PB_PAGEMAP_HEAD) < 0) return NULL; *id = h->pages_id; pagemap_head__free_unpacked(h, NULL); } else { PagemapHead h = PAGEMAP_HEAD__INIT; *id = h.pages_id = page_ids++; if (pb_write_one(pmi, &h, PB_PAGEMAP_HEAD) < 0) return NULL; } return open_image_at(dfd, CR_FD_PAGES, flags, *id); }
static int dump_tunfile(int lfd, u32 id, const struct fd_parms *p) { int ret; struct cr_img *img; TunfileEntry tfe = TUNFILE_ENTRY__INIT; struct ifreq ifr; if (!(root_ns_mask & CLONE_NEWNET)) { pr_err("Net namespace is required to dump tun link\n"); return -1; } if (dump_one_reg_file(lfd, id, p)) return -1; pr_info("Dumping tun-file %d with id %#x\n", lfd, id); tfe.id = id; ret = ioctl(lfd, TUNGETIFF, &ifr); if (ret < 0) { if (errno != EBADFD) { pr_perror("Can't dump tun-file device"); return -1; } /* * Otherwise this is just opened file with not yet attached * tun device. Go agead an write the respective entry. */ } else { tfe.netdev = ifr.ifr_name; pr_info("`- attached to device %s (flags %x)\n", tfe.netdev, ifr.ifr_flags); if (ifr.ifr_flags & IFF_DETACH_QUEUE) { tfe.has_detached = true; tfe.detached = true; } if (dump_tun_link_fd(lfd, tfe.netdev, ifr.ifr_flags) == NULL) return -1; } img = img_from_set(glob_imgset, CR_FD_TUNFILE); return pb_write_one(img, &tfe, PB_TUNFILE); }
static int dump_eventfd_entry(union fdinfo_entries *e, void *arg) { struct eventfd_dump_arg *da = arg; if (da->dumped) { pr_err("Several counters in a file?\n"); return -1; } da->dumped = true; e->efd.id = da->id; e->efd.flags = da->p->flags; e->efd.fown = (FownEntry *)&da->p->fown; pr_info_eventfd("Dumping ", &e->efd); return pb_write_one(fdset_fd(glob_fdset, CR_FD_EVENTFD), &e->efd, PB_EVENTFD); }
static int dump_ipc_shm_seg(struct cr_img *img, int id, const struct shmid_ds *ds) { IpcShmEntry shm = IPC_SHM_ENTRY__INIT; IpcDescEntry desc = IPC_DESC_ENTRY__INIT; int ret; shm.desc = &desc; shm.size = ds->shm_segsz; fill_ipc_desc(id, shm.desc, &ds->shm_perm); pr_info_ipc_shm(&shm); ret = pb_write_one(img, &shm, PB_IPC_SHM); if (ret < 0) { pr_err("Failed to write IPC shared memory segment\n"); return ret; } return dump_ipc_shm_pages(img, &shm); }
static int dump_one_signalfd(int lfd, u32 id, const struct fd_parms *p) { SignalfdEntry sfd = SIGNALFD_ENTRY__INIT; FileEntry fe = FILE_ENTRY__INIT; if (parse_fdinfo(lfd, FD_TYPES__SIGNALFD, &sfd)) return -1; sfd.id = id; sfd.flags = p->flags; sfd.fown = (FownEntry *)&p->fown; fe.type = FD_TYPES__SIGNALFD; fe.id = sfd.id; fe.sgfd = &sfd; return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE); }
int open_pages_image_at(int dfd, unsigned long flags, int pm_fd) { unsigned id; if (flags == O_RDONLY || flags == O_RDWR) { PagemapHead *h; if (pb_read_one(pm_fd, &h, PB_PAGEMAP_HEAD) < 0) return -1; id = h->pages_id; pagemap_head__free_unpacked(h, NULL); } else { PagemapHead h = PAGEMAP_HEAD__INIT; id = h.pages_id = page_ids++; if (pb_write_one(pm_fd, &h, PB_PAGEMAP_HEAD) < 0) return -1; } return open_image_at(dfd, CR_FD_PAGES, flags, id); }
static int dump_ipc_sem_desc(struct cr_img *img, int id, const struct semid_ds *ds) { IpcSemEntry sem = IPC_SEM_ENTRY__INIT; IpcDescEntry desc = IPC_DESC_ENTRY__INIT; int ret; sem.desc = &desc; sem.nsems = ds->sem_nsems; fill_ipc_desc(id, sem.desc, &ds->sem_perm); pr_info_ipc_sem_entry(&sem); ret = pb_write_one(img, &sem, PB_IPC_SEM); if (ret < 0) { pr_err("Failed to write IPC semaphores set\n"); return ret; } return dump_ipc_sem_set(img, &sem); }
int write_img_inventory(void) { int fd; InventoryEntry he = INVENTORY_ENTRY__INIT; pr_info("Writing image inventory (version %u)\n", CRTOOLS_IMAGES_V1); fd = open_image(CR_FD_INVENTORY, O_DUMP); if (fd < 0) return -1; he.img_version = CRTOOLS_IMAGES_V1; if (pb_write_one(fd, &he, PB_INVENTORY) < 0) return -1; close(fd); return 0; }
int write_img_inventory(void) { struct cr_img *img; InventoryEntry he = INVENTORY_ENTRY__INIT; struct { struct pstree_item i; struct dmp_info d; } crt = { }; pr_info("Writing image inventory (version %u)\n", CRTOOLS_IMAGES_V1); img = open_image(CR_FD_INVENTORY, O_DUMP); if (!img) return -1; he.img_version = CRTOOLS_IMAGES_V1_1; he.fdinfo_per_id = true; he.has_fdinfo_per_id = true; he.ns_per_id = true; he.has_ns_per_id = true; he.lsmtype = host_lsm_type(); crt.i.state = TASK_ALIVE; crt.i.pid.real = getpid(); if (get_task_ids(&crt.i)) { close_image(img); return -1; } he.has_root_cg_set = true; if (dump_task_cgroup(NULL, &he.root_cg_set)) return -1; he.root_ids = crt.i.ids; if (pb_write_one(img, &he, PB_INVENTORY) < 0) return -1; xfree(crt.i.ids); close_image(img); return 0; }
static int dump_ipc_msg_queue(struct cr_img *img, int id, const struct msqid_ds *ds) { IpcMsgEntry msg = IPC_MSG_ENTRY__INIT; IpcDescEntry desc = IPC_DESC_ENTRY__INIT; int ret; msg.desc = &desc; fill_ipc_desc(id, msg.desc, &ds->msg_perm); msg.qbytes = ds->msg_qbytes; msg.qnum = ds->msg_qnum; pr_info_ipc_msg_entry(&msg); ret = pb_write_one(img, &msg, PB_IPCNS_MSG_ENT); if (ret < 0) { pr_err("Failed to write IPC message queue\n"); return ret; } return dump_ipc_msg_queue_messages(img, &msg, ds->msg_qnum); }
int dump_one_ns_file(int lfd, u32 id, const struct fd_parms *p) { int fd = fdset_fd(glob_fdset, CR_FD_NS_FILES); NsFileEntry nfe = NS_FILE_ENTRY__INIT; struct fd_link *link = p->link; unsigned int nsid; nsid = lookup_ns_id(link->ns_kid, link->ns_d); if (!nsid) { pr_err("No NS ID with kid %u\n", link->ns_kid); return -1; } nfe.id = id; nfe.ns_id = nsid; nfe.ns_cflag = link->ns_d->cflag; nfe.flags = p->flags; return pb_write_one(fd, &nfe, PB_NS_FILE); }
int dump_one_ns_file(int lfd, u32 id, const struct fd_parms *p) { struct cr_img *img = img_from_set(glob_imgset, CR_FD_NS_FILES); NsFileEntry nfe = NS_FILE_ENTRY__INIT; struct fd_link *link = p->link; struct ns_id *nsid; nsid = lookup_ns_by_kid(link->ns_kid, link->ns_d); if (!nsid) { pr_err("No NS ID with kid %u\n", link->ns_kid); return -1; } nfe.id = id; nfe.ns_id = nsid->id; nfe.ns_cflag = link->ns_d->cflag; nfe.flags = p->flags; return pb_write_one(img, &nfe, PB_NS_FILE); }
int dump_uts_ns(int ns_pid, struct cr_fdset *fdset) { int ret; struct utsname ubuf; UtsnsEntry ue = UTSNS_ENTRY__INIT; ret = switch_ns(ns_pid, CLONE_NEWUTS, "uts", NULL); if (ret < 0) return ret; ret = uname(&ubuf); if (ret < 0) { pr_perror("Error calling uname"); return ret; } ue.nodename = ubuf.nodename; ue.domainname = ubuf.domainname; return pb_write_one(fdset_fd(fdset, CR_FD_UTSNS), &ue, PB_UTSNS); }
static int dump_seccomp_filters(void) { SeccompEntry se = SECCOMP_ENTRY__INIT; int ret = -1, i; /* If we didn't collect any filters, don't create a seccomp image at all. */ if (next_filter_id == 0) return 0; se.seccomp_filters = xzalloc(sizeof(*se.seccomp_filters) * next_filter_id); if (!se.seccomp_filters) return -1; se.n_seccomp_filters = next_filter_id; for (i = 0; i < next_filter_id; i++) { SeccompFilter *sf; struct seccomp_info *cur = filters[i]; sf = se.seccomp_filters[cur->id] = &cur->filter; if (cur->prev) { sf->has_prev = true; sf->prev = cur->prev->id; } } ret = pb_write_one(img_from_set(glob_imgset, CR_FD_SECCOMP), &se, PB_SECCOMP); xfree(se.seccomp_filters); for (i = 0; i < next_filter_id; i++) { struct seccomp_info *freeme = filters[i]; xfree(freeme->filter.filter.data); xfree(freeme); } xfree(filters); return ret; }
static int copy_file_to_chunks(int fd, struct cr_img *img, size_t file_size) { GhostChunkEntry ce = GHOST_CHUNK_ENTRY__INIT; off_t data, hole = 0; while (hole < file_size) { data = lseek(fd, hole, SEEK_DATA); if (data < 0) { if (errno == ENXIO) /* No data */ break; else if (hole == 0) { /* No SEEK_HOLE/DATA by FS */ data = 0; hole = file_size; } else { pr_perror("Can't seek file data"); return -1; } } else { hole = lseek(fd, data, SEEK_HOLE); if (hole < 0) { pr_perror("Can't seek file hole"); return -1; } } ce.len = hole - data; ce.off = data; if (pb_write_one(img, &ce, PB_GHOST_CHUNK)) return -1; if (copy_chunk_from_file(fd, img_raw_fd(img), ce.off, ce.len)) return -1; } return 0; }
int cpu_dump_cpuinfo(void) { CpuinfoEntry cpu_info = CPUINFO_ENTRY__INIT; CpuinfoPpc64Entry cpu_ppc64_info = CPUINFO_PPC64_ENTRY__INIT; CpuinfoPpc64Entry *cpu_ppc64_info_ptr = &cpu_ppc64_info; struct cr_img *img; int ret = -1; img = open_image(CR_FD_CPUINFO, O_DUMP); if (!img) return -1; cpu_info.ppc64_entry = &cpu_ppc64_info_ptr; cpu_info.n_ppc64_entry = 1; cpu_ppc64_info.endian = CURRENT_ENDIANNESS; cpu_ppc64_info.n_hwcap = 2; cpu_ppc64_info.hwcap = rt_cpuinfo.hwcap; ret = pb_write_one(img, &cpu_info, PB_CPUINFO); close_image(img); return ret; }
static int dump_tcp_conn_state(struct inet_sk_desc *sk) { int ret, img_fd, aux; TcpStreamEntry tse = TCP_STREAM_ENTRY__INIT; char *in_buf, *out_buf; /* * Read queue */ pr_info("Reading inq for socket\n"); tse.inq_len = sk->rqlen; ret = tcp_stream_get_queue(sk->rfd, TCP_RECV_QUEUE, &tse.inq_seq, tse.inq_len, &in_buf); if (ret < 0) goto err_in; /* * Write queue */ pr_info("Reading outq for socket\n"); tse.outq_len = sk->wqlen; tse.unsq_len = sk->uwqlen; tse.has_unsq_len = true; ret = tcp_stream_get_queue(sk->rfd, TCP_SEND_QUEUE, &tse.outq_seq, tse.outq_len, &out_buf); if (ret < 0) goto err_out; /* * Initial options */ pr_info("Reading options for socket\n"); ret = tcp_stream_get_options(sk->rfd, &tse); if (ret < 0) goto err_opt; /* * TCP socket options */ if (dump_opt(sk->rfd, SOL_TCP, TCP_NODELAY, &aux)) goto err_opt; if (aux) { tse.has_nodelay = true; tse.nodelay = true; } if (dump_opt(sk->rfd, SOL_TCP, TCP_CORK, &aux)) goto err_opt; if (aux) { tse.has_cork = true; tse.cork = true; } /* * Push the stuff to image */ img_fd = open_image(CR_FD_TCP_STREAM, O_DUMP, sk->sd.ino); if (img_fd < 0) goto err_img; ret = pb_write_one(img_fd, &tse, PB_TCP_STREAM); if (ret < 0) goto err_iw; if (in_buf) { ret = write_img_buf(img_fd, in_buf, tse.inq_len); if (ret < 0) goto err_iw; } if (out_buf) { ret = write_img_buf(img_fd, out_buf, tse.outq_len); if (ret < 0) goto err_iw; } pr_info("Done\n"); err_iw: close(img_fd); err_img: err_opt: xfree(out_buf); err_out: xfree(in_buf); err_in: return ret; }
static int dump_one_netlink_fd(int lfd, u32 id, const struct fd_parms *p) { struct netlink_sk_desc *sk; FileEntry fe = FILE_ENTRY__INIT; NetlinkSkEntry ne = NETLINK_SK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; sk = (struct netlink_sk_desc *)lookup_socket(p->stat.st_ino, PF_NETLINK, 0); if (IS_ERR(sk)) goto err; ne.id = id; ne.ino = p->stat.st_ino; if (!can_dump_netlink_sk(lfd)) goto err; if (sk) { BUG_ON(sk->sd.already_dumped); ne.protocol = sk->protocol; ne.portid = sk->portid; ne.groups = sk->groups; ne.n_groups = sk->gsize / sizeof(ne.groups[0]); /* * On 64-bit sk->gsize is multiple to 8 bytes (sizeof(long)), * so remove the last 4 bytes if they are empty. */ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ /* * Big endian swap: Ugly hack for zdtm/static/sk-netlink * * For big endian systems: * * - sk->groups[0] are bits 32-64 * - sk->groups[1] are bits 0-32 */ if (ne.n_groups == 2) { uint32_t tmp = sk->groups[1]; sk->groups[1] = sk->groups[0]; sk->groups[0] = tmp; } #endif if (ne.n_groups && sk->groups[ne.n_groups - 1] == 0) ne.n_groups -= 1; if (ne.n_groups > 1) { pr_err("%d %x\n", sk->gsize, sk->groups[1]); pr_err("The netlink socket 0x%x has more than 32 groups\n", ne.ino); return -1; } if (sk->groups && !sk->portid) { pr_err("The netlink socket 0x%x is bound to groups but not to portid\n", ne.ino); return -1; } ne.state = sk->state; ne.dst_portid = sk->dst_portid; ne.dst_group = sk->dst_group; } else { /* unconnected and unbound socket */ int val; socklen_t aux = sizeof(val); if (getsockopt(lfd, SOL_SOCKET, SO_PROTOCOL, &val, &aux) < 0) { pr_perror("Unable to get protocol for netlink socket"); goto err; } ne.protocol = val; } ne.fown = (FownEntry *)&p->fown; ne.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) goto err; fe.type = FD_TYPES__NETLINKSK; fe.id = ne.id; fe.nlsk = ≠ if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE)) goto err; return 0; err: return -1; }
static int dump_one_netlink_fd(int lfd, u32 id, const struct fd_parms *p) { struct netlink_sk_desc *sk; NetlinkSkEntry ne = NETLINK_SK_ENTRY__INIT; SkOptsEntry skopts = SK_OPTS_ENTRY__INIT; sk = (struct netlink_sk_desc *)lookup_socket(p->stat.st_ino, PF_NETLINK, 0); if (IS_ERR(sk)) goto err; ne.id = id; ne.ino = p->stat.st_ino; if (!can_dump_netlink_sk(lfd)) goto err; if (sk) { BUG_ON(sk->sd.already_dumped); ne.protocol = sk->protocol; ne.portid = sk->portid; ne.groups = sk->groups; ne.n_groups = sk->gsize / sizeof(ne.groups[0]); /* * On 64-bit sk->gsize is multiple to 8 bytes (sizeof(long)), * so remove the last 4 bytes if they are empty. */ if (ne.n_groups && sk->groups[ne.n_groups - 1] == 0) ne.n_groups -= 1; if (ne.n_groups > 1) { pr_err("%d %x\n", sk->gsize, sk->groups[1]); pr_err("The netlink socket 0x%x has more than 32 groups\n", ne.ino); return -1; } if (sk->groups && !sk->portid) { pr_err("The netlink socket 0x%x is bound to groups but not to portid\n", ne.ino); return -1; } ne.state = sk->state; ne.dst_portid = sk->dst_portid; ne.dst_group = sk->dst_group; } else { /* unconnected and unbound socket */ int val; socklen_t aux = sizeof(val); if (getsockopt(lfd, SOL_SOCKET, SO_PROTOCOL, &val, &aux) < 0) { pr_perror("Unable to get protocol for netlink socket"); goto err; } ne.protocol = val; } ne.fown = (FownEntry *)&p->fown; ne.opts = &skopts; if (dump_socket_opts(lfd, &skopts)) goto err; if (pb_write_one(img_from_set(glob_imgset, CR_FD_NETLINK_SK), &ne, PB_NETLINK_SK)) goto err; return 0; err: return -1; }