static int prepare_ipc_sem_values(struct cr_img *img, const IpcSemEntry *sem) { int ret, size; u16 *values; size = round_up(sizeof(u16) * sem->nsems, sizeof(u64)); values = xmalloc(size); if (values == NULL) { pr_err("Failed to allocate memory for semaphores set values\n"); ret = -ENOMEM; goto out; } ret = read_img_buf(img, values, size); if (ret < 0) { pr_err("Failed to allocate memory for semaphores set values\n"); ret = -ENOMEM; goto out; } pr_info_ipc_sem_array(sem->nsems, values); ret = semctl(sem->desc->id, 0, SETALL, values); if (ret < 0) { pr_perror("Failed to set semaphores set values"); ret = -errno; } out: xfree(values); return ret; }
static int __send_tcp_queue(int sk, int queue, u32 len, int imgfd) { int ret, err = -1; int off, max; char *buf; buf = xmalloc(len); if (!buf) return -1; if (read_img_buf(imgfd, buf, len) < 0) goto err; max = (queue == TCP_SEND_QUEUE) ? tcp_max_wshare : tcp_max_rshare; off = 0; while (len) { int chunk = (len > max ? max : len); ret = send(sk, buf + off, chunk, 0); if (ret != chunk) { pr_perror("Can't restore %d queue data (%d), want (%d:%d)", queue, ret, chunk, len); goto err; } off += chunk; len -= chunk; } err = 0; err: xfree(buf); return err; }
static int __send_tcp_queue(int sk, int queue, u32 len, struct cr_img *img) { int ret, err = -1, max_chunk; int off; char *buf; buf = xmalloc(len); if (!buf) return -1; if (read_img_buf(img, buf, len) < 0) goto err; max_chunk = len; off = 0; do { int chunk = len; if (chunk > max_chunk) chunk = max_chunk; ret = send(sk, buf + off, chunk, 0); if (ret <= 0) { if (max_chunk > 1024) { /* * Kernel not only refuses the whole chunk, * but refuses to split it into pieces too. * * When restoring recv queue in repair mode * kernel doesn't try hard and just allocates * a linear skb with the size we pass to the * system call. Thus, if the size is too big * for slab allocator, the send just fails * with ENOMEM. * * In any case -- try smaller chunk, hopefully * there's still enough memory in the system. */ max_chunk >>= 1; continue; } pr_perror("Can't restore %d queue data (%d), want (%d:%d:%d)", queue, ret, chunk, len, max_chunk); goto err; } off += ret; len -= ret; } while (len);
void ipc_sem_handler(int fd, void *obj) { IpcSemEntry *e = obj; u16 *values; int size; pr_msg("\n"); size = round_up(sizeof(u16) * e->nsems, sizeof(u64)); values = xmalloc(size); if (values == NULL) return; if (read_img_buf(fd, values, size) <= 0) { xfree(values); return; } pr_msg_ipc_sem_array(e->nsems, values); }
static int prepare_ipc_msg_queue_messages(struct cr_img *img, const IpcMsgEntry *msq) { IpcMsg *msg = NULL; int msg_nr = 0; int ret = 0; while (msg_nr < msq->qnum) { struct msgbuf { long mtype; char mtext[MSGMAX]; } data; ret = pb_read_one(img, &msg, PB_IPCNS_MSG); if (ret <= 0) return -EIO; pr_info_ipc_msg(msg_nr, msg); if (msg->msize > MSGMAX) { ret = -1; pr_err("Unsupported message size: %d (MAX: %d)\n", msg->msize, MSGMAX); break; } ret = read_img_buf(img, data.mtext, round_up(msg->msize, sizeof(u64))); if (ret < 0) { pr_err("Failed to read IPC message data\n"); break; } data.mtype = msg->mtype; ret = msgsnd(msq->desc->id, &data, msg->msize, IPC_NOWAIT); if (ret < 0) { pr_perror("Failed to send IPC message"); ret = -errno; break; } msg_nr++; } if (msg) ipc_msg__free_unpacked(msg, NULL); return ret; }
int read_img_str(struct cr_img *img, char **pstr, int size) { int ret; char *str; str = xmalloc(size + 1); if (!str) return -1; ret = read_img_buf(img, str, size); if (ret < 0) { xfree(str); return -1; } str[size] = '\0'; *pstr = str; return 0; }
static int prepare_ipc_shm_pages(struct cr_img *img, const IpcShmEntry *shm) { int ret; void *data; data = shmat(shm->desc->id, NULL, 0); if (data == (void *)-1) { pr_perror("Failed to attach IPC shared memory"); return -errno; } ret = read_img_buf(img, data, round_up(shm->size, sizeof(u32))); if (ret < 0) { pr_err("Failed to read IPC shared memory data\n"); return ret; } if (shmdt(data)) { pr_perror("Failed to detach IPC shared memory"); return -errno; } return 0; }