Example #1
0
sem_id
_user_create_sem(int32 count, const char *userName)
{
	char name[B_OS_NAME_LENGTH];

	if (userName == NULL)
		return create_sem_etc(count, NULL, team_get_current_team_id());

	if (!IS_USER_ADDRESS(userName)
		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
		return B_BAD_ADDRESS;

	return create_sem_etc(count, name, team_get_current_team_id());
}
Example #2
0
status_t
PackagesDirectory::Init(const char* path, dev_t mountPointDeviceID,
	ino_t mountPointNodeID, struct stat& _st)
{
	// Open the directory. We want the path be interpreted depending on from
	// where it came (kernel or userland), but we always want a FD in the
	// kernel I/O context. There's no VFS service method to do that for us,
	// so we need to do that ourselves.
	bool calledFromKernel
		= team_get_current_team_id() == team_get_kernel_team_id();
		// Not entirely correct, but good enough for now. The only
		// alternative is to have that information passed in as well.

	struct vnode* vnode;
	status_t error;
	if (path != NULL) {
		error = vfs_get_vnode_from_path(path, calledFromKernel, &vnode);
	} else {
		// No path given -- use the "packages" directory at our mount point.
		error = vfs_entry_ref_to_vnode(mountPointDeviceID, mountPointNodeID,
			"packages", &vnode);
	}
	if (error != B_OK) {
		ERROR("Failed to open packages directory \"%s\"\n", strerror(error));
		RETURN_ERROR(error);
	}

	return _Init(vnode, _st);
}
status_t
_user_system_profiler_recorded(struct system_profiler_parameters* userParameters)
{
	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
		return B_BAD_ADDRESS;
	if (sRecordedParameters == NULL)
		return B_ERROR;

	// Transfer the area to the userland process

	void* address;
	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
		B_ANY_ADDRESS, team_get_current_team_id(), true);
	if (newArea < 0)
		return newArea;

	status_t status = set_area_protection(newArea, B_READ_AREA);
	if (status == B_OK) {
		sRecordedParameters->buffer_area = newArea;

		status = user_memcpy(userParameters, sRecordedParameters,
			sizeof(system_profiler_parameters));
	}
	if (status != B_OK)
		delete_area(newArea);

	delete sRecordedParameters;
	sRecordedParameters = NULL;

	return status;
}
Example #4
0
static void
node_opened(struct vnode *vnode, int32 fdType, dev_t device, ino_t parent,
	ino_t node, const char *name, off_t size)
{
	if (device < gBootDevice) {
		// we ignore any access to rootfs, pipefs, and devfs
		// ToDo: if we can ever move the boot device on the fly, this will break
		return;
	}

	Session *session;
	SessionGetter getter(team_get_current_team_id(), &session);

	if (session == NULL) {
		char buffer[B_FILE_NAME_LENGTH];
		if (name == NULL
			&& vfs_get_vnode_name(vnode, buffer, sizeof(buffer)) == B_OK)
			name = buffer;

		// create new session for this team
		getter.New(name, device, node, &session);
	}

	if (session == NULL || !session->IsActive()) {
		if (sMainSession != NULL) {
			// ToDo: this opens a race condition with the "stop session" syscall
			getter.Stop();
		}
		return;
	}

	session->AddNode(device, node);
}
Example #5
0
status_t
core_dump_write_core_file(const char* path, bool killTeam)
{
	TRACE("core_dump_write_core_file(\"%s\", %d): team: %" B_PRId32 "\n", path,
		killTeam, team_get_current_team_id());

	CoreDumper* coreDumper = new(std::nothrow) CoreDumper();
	if (coreDumper == NULL)
		return B_NO_MEMORY;
	ObjectDeleter<CoreDumper> coreDumperDeleter(coreDumper);
	return coreDumper->Dump(path, killTeam);
}
Example #6
0
static void
node_closed(struct vnode *vnode, int32 fdType, dev_t device, ino_t node,
	int32 accessType)
{
	Session *session;
	SessionGetter getter(team_get_current_team_id(), &session);

	if (session == NULL)
		return;

	if (accessType == FILE_CACHE_NO_IO)
		session->RemoveNode(device, node);
}
status_t
DefaultUserNotificationService::_AddListener(uint32 eventMask,
	NotificationListener& notificationListener)
{
	default_listener* listener = new(std::nothrow) default_listener;
	if (listener == NULL)
		return B_NO_MEMORY;

	listener->eventMask = eventMask;
	listener->team = team_get_current_team_id();
	listener->listener = &notificationListener;

	RecursiveLocker _(fLock);
	if (fListeners.IsEmpty())
		FirstAdded();
	fListeners.Add(listener);

	return B_OK;
}
Example #8
0
status_t
_get_next_port_info(team_id team, int32* _cookie, struct port_info* info,
	size_t size)
{
	TRACE(("get_next_port_info(team = %ld)\n", team));

	if (info == NULL || size != sizeof(port_info) || _cookie == NULL
		|| team < B_OK)
		return B_BAD_VALUE;
	if (!sPortsActive)
		return B_BAD_PORT_ID;

	int32 slot = *_cookie;
	if (slot >= sMaxPorts)
		return B_BAD_PORT_ID;

	if (team == B_CURRENT_TEAM)
		team = team_get_current_team_id();

	info->port = -1; // used as found flag

	while (slot < sMaxPorts) {
		MutexLocker locker(sPorts[slot].lock);

		if (sPorts[slot].id != -1 && !is_port_closed(slot)
			&& sPorts[slot].owner == team) {
			// found one!
			fill_port_info(&sPorts[slot], info, size);
			slot++;
			break;
		}

		slot++;
	}

	if (info->port == -1)
		return B_BAD_PORT_ID;

	*_cookie = slot;
	return B_OK;
}
status_t
socket_open(int family, int type, int protocol, net_socket** _socket)
{
	net_socket_private* socket;
	status_t status = create_socket(family, type, protocol, &socket);
	if (status != B_OK)
		return status;

	status = socket->first_info->open(socket->first_protocol);
	if (status != B_OK) {
		delete socket;
		return status;
	}

	socket->owner = team_get_current_team_id();
	socket->is_in_socket_list = true;

	mutex_lock(&sSocketLock);
	sSocketList.Add(socket);
	mutex_unlock(&sSocketLock);

	*_socket = socket;
	return B_OK;
}
Example #10
0
port_id
create_port(int32 queueLength, const char* name)
{
	TRACE(("create_port(queueLength = %ld, name = \"%s\")\n", queueLength,
		name));

	if (!sPortsActive) {
		panic("ports used too early!\n");
		return B_BAD_PORT_ID;
	}
	if (queueLength < 1 || queueLength > MAX_QUEUE_LENGTH)
		return B_BAD_VALUE;

	struct team* team = thread_get_current_thread()->team;
	if (team == NULL)
		return B_BAD_TEAM_ID;

	MutexLocker locker(sPortsLock);

	// check early on if there are any free port slots to use
	if (sUsedPorts >= sMaxPorts)
		return B_NO_MORE_PORTS;

	// check & dup name
	char* nameBuffer = strdup(name != NULL ? name : "unnamed port");
	if (nameBuffer == NULL)
		return B_NO_MEMORY;

	sUsedPorts++;

	// find the first empty spot
	for (int32 slot = 0; slot < sMaxPorts; slot++) {
		int32 i = (slot + sFirstFreeSlot) % sMaxPorts;

		if (sPorts[i].id == -1) {
			// make the port_id be a multiple of the slot it's in
			if (i >= sNextPort % sMaxPorts)
				sNextPort += i - sNextPort % sMaxPorts;
			else
				sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i);
			sFirstFreeSlot = slot + 1;

			MutexLocker portLocker(sPorts[i].lock);
			sPorts[i].id = sNextPort++;
			locker.Unlock();

			sPorts[i].capacity = queueLength;
			sPorts[i].owner = team_get_current_team_id();
			sPorts[i].lock.name = nameBuffer;
			sPorts[i].read_count = 0;
			sPorts[i].write_count = queueLength;
			sPorts[i].total_count = 0;
			sPorts[i].select_infos = NULL;

			{
				InterruptsSpinLocker teamLocker(gTeamSpinlock);
				list_add_item(&team->port_list, &sPorts[i].team_link);
			}

			port_id id = sPorts[i].id;

			T(Create(sPorts[i]));
			portLocker.Unlock();

			TRACE(("create_port() done: port created %ld\n", id));

			sNotificationService.Notify(PORT_ADDED, id);
			return id;
		}
	}

	// Still not enough ports... - due to sUsedPorts, this cannot really
	// happen anymore.
	panic("out of ports, but sUsedPorts is broken");
	return B_NO_MORE_PORTS;
}
Example #11
0
status_t
writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
{
	if (!sPortsActive || id < 0)
		return B_BAD_PORT_ID;
	if (bufferSize > PORT_MAX_MESSAGE_SIZE)
		return B_BAD_VALUE;

	// mask irrelevant flags (for acquire_sem() usage)
	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
		| B_ABSOLUTE_TIMEOUT;
	if ((flags & B_RELATIVE_TIMEOUT) != 0
		&& timeout != B_INFINITE_TIMEOUT && timeout > 0) {
		// Make the timeout absolute, since we have more than one step where
		// we might have to wait
		flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
		timeout += system_time();
	}

	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0;

	int32 slot = id % sMaxPorts;
	status_t status;
	port_message* message = NULL;

	MutexLocker locker(sPorts[slot].lock);

	if (sPorts[slot].id != id) {
		TRACE(("write_port_etc: invalid port_id %ld\n", id));
		return B_BAD_PORT_ID;
	}
	if (is_port_closed(slot)) {
		TRACE(("write_port_etc: port %ld closed\n", id));
		return B_BAD_PORT_ID;
	}

	if (sPorts[slot].write_count <= 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
			return B_WOULD_BLOCK;

		sPorts[slot].write_count--;

		// We need to block in order to wait for a free message slot
		ConditionVariableEntry entry;
		sPorts[slot].write_condition.Add(&entry);

		locker.Unlock();

		status = entry.Wait(flags, timeout);

		locker.Lock();

		if (sPorts[slot].id != id || is_port_closed(slot)) {
			// the port is no longer there
			T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID));
			return B_BAD_PORT_ID;
		}

		if (status != B_OK)
			goto error;
	} else
		sPorts[slot].write_count--;

	status = get_port_message(msgCode, bufferSize, flags, timeout,
		&message);
	if (status != B_OK)
		goto error;

	// sender credentials
	message->sender = geteuid();
	message->sender_group = getegid();
	message->sender_team = team_get_current_team_id();

	if (bufferSize > 0) {
		uint32 i;
		if (userCopy) {
			// copy from user memory
			for (i = 0; i < vecCount; i++) {
				size_t bytes = msgVecs[i].iov_len;
				if (bytes > bufferSize)
					bytes = bufferSize;

				status_t status = user_memcpy(message->buffer,
					msgVecs[i].iov_base, bytes);
				if (status != B_OK) {
					put_port_message(message);
					goto error;
				}

				bufferSize -= bytes;
				if (bufferSize == 0)
					break;
			}
		} else {
			// copy from kernel memory
			for (i = 0; i < vecCount; i++) {
				size_t bytes = msgVecs[i].iov_len;
				if (bytes > bufferSize)
					bytes = bufferSize;

				memcpy(message->buffer, msgVecs[i].iov_base, bytes);

				bufferSize -= bytes;
				if (bufferSize == 0)
					break;
			}
		}
	}

	sPorts[slot].messages.Add(message);
	sPorts[slot].read_count++;

	T(Write(sPorts[slot], message->code, message->size, B_OK));

	notify_port_select_events(slot, B_EVENT_READ);
	sPorts[slot].read_condition.NotifyOne();
	return B_OK;

error:
	// Give up our slot in the queue again, and let someone else
	// try and fail
	T(Write(sPorts[slot], 0, 0, status));
	sPorts[slot].write_count++;
	notify_port_select_events(slot, B_EVENT_WRITE);
	sPorts[slot].write_condition.NotifyOne();

	return status;
}