/* Function used by daemon to munmap a sysv resource. */
static int
munmap_seg(int shmid, void *addr) {
	struct shmid_ds *shmseg;
	struct shm_handle *internal;

	size_t size;

	shmseg = shm_find_segment_by_shmid(shmid);
	if (!shmseg) {
		sysvd_print_err("munmap_seg error:"
				"semid %d not found\n", shmid);
		return (-1);
	}

	internal = (struct shm_handle *)shmseg->shm_internal;
	if (!internal) {
		sysvd_print_err("munmap_seg error: internal for"
				"semid %d not found\n", shmid);
		return (-1);
	}

	size = round_page(shmseg->shm_segsz);
	munmap(addr, size);

	return (0);
}
/* Function used by daemon to map a sysv resource. */
static void *
map_seg(int shmid) {
	struct shmid_ds *shmseg;
	struct shm_handle *internal;

	int fd;
	size_t size;
	void *addr;

	shmseg = shm_find_segment_by_shmid(shmid);
	if (!shmseg) {
		sysvd_print_err("map_seg error:"
				"semid %d not found\n", shmid);
		return (NULL);
	}

	internal = (struct shm_handle *)shmseg->shm_internal;
	if (!internal) {
		sysvd_print_err("map_seg error: internal for"
				"semid %d not found\n", shmid);
		return (NULL);
	}

	fd = internal->fd;

	size = round_page(shmseg->shm_segsz);

	addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
	if (!addr) {
		sysvd_print_err("map_seg: error mmap semid = %d\n", shmid);
		return (NULL);
	}

	return (addr);
}
/* Handle a shmget() request. */
int
handle_shmget(pid_t pid, struct shmget_msg *shmget_msg,
		struct cmsgcred *cred ) {
	int segnum, mode, error;
	struct shmid_ds *shmseg;
	struct shm_handle *handle;

	//if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
	//	return (ENOSYS);
	mode = shmget_msg->shmflg & ACCESSPERMS;

	sysvd_print("ask for key = %ld\n", shmget_msg->key);
	shmget_msg->key = (shmget_msg->key & 0x3FFF) |
		(shmget_msg->type << 30);
	sysvd_print("ask for key = %ld\n", shmget_msg->key);

	if (shmget_msg->key != IPC_PRIVATE) {
		//again:
		segnum = shm_find_segment_by_key(shmget_msg->key);
		if (segnum >= 0) {
			error = shmget_existing(shmget_msg, mode, segnum, cred);
			//TODO if daemon is multithreading
			//if (error == EAGAIN)
			//	goto again;
			goto done;
		}
		if ((shmget_msg->shmflg & IPC_CREAT) == 0) {
			error = -ENOENT;
			goto done_err;
		}
	}
	error = shmget_allocate_segment(pid, shmget_msg, mode, cred);
	sysvd_print("allocate segment = %d\n", error);
done:
	/*
	 * Install to th client the file corresponding to the
	 * shared memory segment.
	 * client_fd is the file descriptor added in the client
	 * files table.
	 */
	shmseg = shm_find_segment_by_shmid(error);
	if (shmseg == NULL) {
		sysvd_print_err("can not find segment by shmid\n");
		return (-1);
	}

	handle = (struct shm_handle *)shmseg->shm_internal;
	if (install_fd_client(pid, handle->fd) != 0)
		error = errno;
done_err:
	return (error);

}
Exemple #4
0
int
shmctl1(struct proc *p, int shmid, int cmd, caddr_t buf,
    int (*ds_copyin)(const void *, void *, size_t),
    int (*ds_copyout)(const void *, void *, size_t))
{
	struct ucred *cred = p->p_ucred;
	struct shmid_ds inbuf, *shmseg;
	int error;

	shmseg = shm_find_segment_by_shmid(shmid);
	if (shmseg == NULL)
		return (EINVAL);
	switch (cmd) {
	case IPC_STAT:
		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
			return (error);
		error = ds_copyout(shmseg, buf, sizeof(inbuf));
		if (error)
			return (error);
		break;
	case IPC_SET:
		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
			return (error);
		error = ds_copyin(buf, &inbuf, sizeof(inbuf));
		if (error)
			return (error);
		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
		shmseg->shm_perm.mode =
		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
		    (inbuf.shm_perm.mode & ACCESSPERMS);
		shmseg->shm_ctime = time_second;
		break;
	case IPC_RMID:
		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
			return (error);
		shmseg->shm_perm.key = IPC_PRIVATE;
		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
		if (shmseg->shm_nattch <= 0) {
			shm_deallocate_segment(shmseg);
			shm_last_free = IPCID_TO_IX(shmid);
			shmsegs[shm_last_free] = NULL;
		}
		break;
	case SHM_LOCK:
	case SHM_UNLOCK:
	default:
		return (EINVAL);
	}
	return (0);
}
Exemple #5
0
		MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK | M_NULL);
		if (shmmap_s == NULL) {
			shmat_ret = ENOMEM;
			goto shmat_out;
		}

		/* initialize the entries */
		for (i = 0; i < nsegs; i++) {
			shmmap_s[i].shmid = SHMID_UNALLOCATED;
		}
		shmmap_s[i].shmid = SHMID_SENTINEL;

		p->vm_shm = (caddr_t)shmmap_s;
	}

	shmseg = shm_find_segment_by_shmid(uap->shmid);
	if (shmseg == NULL) {
		shmat_ret = EINVAL;
		goto shmat_out;
	}

	AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
	error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
	if (error) {
		shmat_ret = error;
		goto shmat_out;
	}

#if CONFIG_MACF
	error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg);
Exemple #6
0
int
sys_shmat(struct proc *p, void *v, register_t *retval)
{
	struct sys_shmat_args /* {
		syscallarg(int) shmid;
		syscallarg(const void *) shmaddr;
		syscallarg(int) shmflg;
	} */ *uap = v;
	int error, i, flags;
	struct ucred *cred = p->p_ucred;
	struct shmid_ds *shmseg;
	struct shmmap_head *shmmap_h;
	struct shmmap_state *shmmap_s;
	struct shm_handle *shm_handle;
	vaddr_t attach_va;
	vm_prot_t prot;
	vsize_t size;

	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
	if (shmmap_h == NULL) {
		size = sizeof(int) +
		    shminfo.shmseg * sizeof(struct shmmap_state);
		shmmap_h = malloc(size, M_SHM, M_WAITOK);
		shmmap_h->shmseg = shminfo.shmseg;
		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
		    i++, shmmap_s++)
			shmmap_s->shmid = -1;
		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
	}
	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
	if (shmseg == NULL)
		return (EINVAL);
	error = ipcperm(cred, &shmseg->shm_perm,
		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
	if (error)
		return (error);
	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
		if (shmmap_s->shmid == -1)
			break;
		shmmap_s++;
	}
	if (i >= shmmap_h->shmseg)
		return (EMFILE);
	size = round_page(shmseg->shm_segsz);
	prot = VM_PROT_READ;
	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
		prot |= VM_PROT_WRITE;
	flags = MAP_ANON | MAP_SHARED;
	if (SCARG(uap, shmaddr)) {
		flags |= MAP_FIXED;
		if (SCARG(uap, shmflg) & SHM_RND) 
			attach_va =
			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
			attach_va = (vaddr_t)SCARG(uap, shmaddr);
		else
			return (EINVAL);
	} else
		attach_va = 0;
	shm_handle = shmseg->shm_internal;
	uao_reference(shm_handle->shm_object);
	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
	if (error) {
		uao_detach(shm_handle->shm_object);
		return (error);
	}

	shmmap_s->va = attach_va;
	shmmap_s->shmid = SCARG(uap, shmid);
	shmseg->shm_lpid = p->p_p->ps_pid;
	shmseg->shm_atime = time_second;
	shmseg->shm_nattch++;
	*retval = attach_va;
	return (0);
}
int
semexit(int undoid) {
	struct sem_undo *suptr;
	struct sem *semptr;
	struct shmid_ds *undoseg;

	if (undoid < 0) {
		return (-1);
	}

	undoseg = shm_find_segment_by_shmid(undoid);
	/* The UNDO segment must be mapped by only one segment. */
	if (undoseg->shm_nattch != 1) {
		sysvd_print_err("undo segment mapped by more"
				"than one process\n");
		exit(-1);
	}

	suptr = (struct sem_undo *)map_seg(undoid);
	if (suptr == NULL) {
		sysvd_print_err("no %d undo segment found\n", undoid);
		return (-1);
	}

	/* No locking mechanism is required because only the
	 * client and the daemon can access the UNDO segment.
	 * At this moment the client is disconnected so only
	 * the daemon can modify this segment.
	 */
	while (suptr->un_cnt) {
		struct semid_pool *semaptr;
		int semid;
		int semnum;
		int adjval;
		int ix;

		ix = suptr->un_cnt - 1;
		semid = suptr->un_ent[ix].un_id;
		semnum = suptr->un_ent[ix].un_num;
		adjval = suptr->un_ent[ix].un_adjval;

		semaptr = (struct semid_pool *)map_seg(semid);
		if (!semaptr) {
			return (-1);
		}

		/* Was it removed? */
		if (semaptr->gen == -1 ||
			semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid) ||
			(semaptr->ds.sem_perm.mode & SHMSEG_ALLOCATED) == 0) {
			--suptr->un_cnt;
			sysvd_print_err("semexit - semid not allocated\n");
			continue;
		}
		if (semnum >= semaptr->ds.sem_nsems) {
			--suptr->un_cnt;
			sysvd_print_err("semexit - semnum out of range\n");
			continue;
		}

#ifdef SYSV_RWLOCK
#ifdef SYSV_SEMS
		sysv_rwlock_rdlock(&semaptr->rwlock);
#else
		sysv_rwlock_wrlock(&semaptr->rwlock);
#endif //SYSV_SEMS
#else
		sysv_mutex_lock(&semaptr->mutex);
		/* Nobody can remove the semaphore beteen the check and the
		 * lock acquisition because it must first send a IPC_RMID
		 * to me and I will process that after finishing this function.
		 */
#endif //SYSV_RWLOCK
		semptr = &semaptr->ds.sem_base[semnum];
#ifdef SYSV_SEMS
		sysv_mutex_lock(&semptr->sem_mutex);
#endif
		if (ix == suptr->un_cnt - 1 &&
		    semid == suptr->un_ent[ix].un_id &&
		    semnum == suptr->un_ent[ix].un_num &&
		    adjval == suptr->un_ent[ix].un_adjval) {
			--suptr->un_cnt;

			if (adjval < 0) {
				if (semptr->semval < -adjval)
					semptr->semval = 0;
				else
					semptr->semval += adjval;
			} else {
				semptr->semval += adjval;
			}
			/* TODO multithreaded daemon:
			 * Check again if the semaphore was removed and do
			 * not wake anyone if it was.*/
			umtx_wakeup((int *)&semptr->semval, 0);
		}
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif

#ifdef SYSV_RWLOCK
		sysv_rwlock_unlock(&semaptr->rwlock);
#else
		sysv_mutex_unlock(&semaptr->mutex);
#endif
		munmap_seg(semid, semaptr);
	}

	munmap_seg(undoid, suptr);
	return (0);
}
/* Handle a shmctl() request. */
int
handle_shmctl(struct shmctl_msg *shmctl_msg,
		struct cmsgcred *cred ) {
	int error = 0;
	struct shmid_ds *shmseg, *inbuf;

	/*	if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
		return (ENOSYS);
		*/
	shmseg = shm_find_segment_by_shmid(shmctl_msg->shmid);

	if (shmseg == NULL) {
		error = EINVAL;
		goto done;
	}

	switch (shmctl_msg->cmd) {
		case IPC_STAT:
			sysvd_print("IPC STAT\n");
			error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
			if (error) {
				sysvd_print("IPC_STAT not allowed\n");
				break;
			}
			shmctl_msg->buf = *shmseg;
			break;
		case IPC_SET:
			sysvd_print("IPC SET\n");
			error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
			if (error) {
				sysvd_print("IPC_SET not allowed\n");
				break;
			}
			inbuf = &shmctl_msg->buf;

			shmseg->shm_perm.uid = inbuf->shm_perm.uid;
			shmseg->shm_perm.gid = inbuf->shm_perm.gid;
			shmseg->shm_perm.mode =
				(shmseg->shm_perm.mode & ~ACCESSPERMS) |
				(inbuf->shm_perm.mode & ACCESSPERMS);
			shmseg->shm_ctime = time(NULL);
			break;
		case IPC_RMID:
			sysvd_print("IPC RMID shmid = %d\n",
					shmctl_msg->shmid);
			error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
			if (error) {
				sysvd_print("IPC_RMID not allowed\n");
				break;
			}
			shmseg->shm_perm.key = IPC_PRIVATE;
			shmseg->shm_perm.mode |= SHMSEG_REMOVED;
			if (shmseg->shm_nattch <= 0) {
				shm_deallocate_segment(IPCID_TO_IX(shmctl_msg->shmid));
				shm_last_free = IPCID_TO_IX(shmctl_msg->shmid);
			}
			else {
				/* In sem and msg cases, other process must be
				 * noticed about the removal. */
				struct shm_handle *internal =
					(struct shm_handle *)shmseg->shm_internal;
				mark_segment_removed(shmctl_msg->shmid,
						internal->type);
			}
			break;
#if 0
		case SHM_LOCK:
		case SHM_UNLOCK:
#endif
		default:
			error = EINVAL;
			break;
	}
done:
	return (error);

}
/* Handle a shmat() request. */
int
handle_shmat(pid_t pid, struct shmat_msg *shmat_msg,
		struct cmsgcred *cred ) {
	int error;
	int fd;
	struct shmid_ds *shmseg;
	struct pid_attached *pidatt;
	struct shm_handle *handle;
	size_t new_size = shmat_msg->size;
	struct client *cl;
	struct id_attached *idatt;

	/*if (!jail_sysvipc_allowed && td->td_cmsgcred->cr_prison != NULL)
	  return (ENOSYS);

again:*/
	shmseg = shm_find_segment_by_shmid(shmat_msg->shmid);
	if (shmseg == NULL) {
		sysvd_print_err("shmat error: segment was not found\n");
		error = EINVAL;
		goto done;
	}
	error = ipcperm(cred, &shmseg->shm_perm, 
			(shmat_msg->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
	if (error)
		goto done;

	handle = shmseg->shm_internal;

	if (shmat_msg->size > shmseg->shm_segsz) {
		if (handle->type != UNDOGET) {
			error = EINVAL;
			goto done;
		}

		fd = ((struct shm_handle*)shmseg->shm_internal)->fd;
		ftruncate(fd, round_page(new_size));
		shmseg->shm_segsz = new_size;
	}

	shmseg->shm_lpid = pid;
	shmseg->shm_atime = time(NULL);

	if (handle->type != UNDOGET)
		shmseg->shm_nattch++;
	else
		shmseg->shm_nattch = 1; /* Only a process calls shmat and
		only once. If it does it for more than once that is because
		it called exec() and reinitialized the undo segment. */

	/* Insert the pid in the segment list of attaced pids.
	 * The list is checked in handle_shmdt so that only
	 * attached pids can dettached from this segment.
	 */
	sysvd_print("nattch = %d pid = %d\n",
			shmseg->shm_nattch, pid);

	pidatt = malloc(sizeof(*pidatt));
	pidatt->pid = pid;
	LIST_INSERT_HEAD(&handle->attached_list, pidatt, link);

	/* Add the segment at the list of attached segments of the client.
	 * It is used when the process finishes its execution. The daemon
	 * walks through the list to dettach the segments.
	 */
	idatt = malloc(sizeof(*idatt));
	idatt->shmid = shmat_msg->shmid;
	cl = _hash_lookup(clientshash, pid);
	LIST_INSERT_HEAD(&cl->ids_attached, idatt, link);

	return (0);
done:
	return (error);
}