static void setup_complete_callback(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) { pa_context *c = userdata; pa_assert(pd); pa_assert(c); pa_assert(c->state == PA_CONTEXT_AUTHORIZING || c->state == PA_CONTEXT_SETTING_NAME); pa_context_ref(c); if (command != PA_COMMAND_REPLY) { pa_context_handle_error(c, command, t, true); goto finish; } switch(c->state) { case PA_CONTEXT_AUTHORIZING: { pa_tagstruct *reply; bool shm_on_remote = false; bool memfd_on_remote = false; if (pa_tagstruct_getu32(t, &c->version) < 0 || !pa_tagstruct_eof(t)) { pa_context_fail(c, PA_ERR_PROTOCOL); goto finish; } /* Minimum supported version */ if (c->version < 8) { pa_context_fail(c, PA_ERR_VERSION); goto finish; } /* Starting with protocol version 13 the MSB of the version tag reflects if shm is available for this connection or not. */ if (c->version >= 13) { shm_on_remote = !!(c->version & 0x80000000U); /* Starting with protocol version 31, the second MSB of the version * tag reflects whether memfd is supported on the other PA end. */ if (c->version >= 31) memfd_on_remote = !!(c->version & 0x40000000U); /* Reserve the two most-significant _bytes_ of the version tag * for flags. */ c->version &= 0x0000FFFFU; } pa_log_debug("Protocol version: remote %u, local %u", c->version, PA_PROTOCOL_VERSION); /* Enable shared memory support if possible */ if (c->do_shm) if (c->version < 10 || (c->version >= 13 && !shm_on_remote)) c->do_shm = false; if (c->do_shm) { /* Only enable SHM if both sides are owned by the same * user. This is a security measure because otherwise * data private to the user might leak. */ #ifdef HAVE_CREDS const pa_creds *creds; if (!(creds = pa_pdispatch_creds(pd)) || getuid() != creds->uid) c->do_shm = false; #endif } pa_log_debug("Negotiated SHM: %s", pa_yes_no(c->do_shm)); pa_pstream_enable_shm(c->pstream, c->do_shm); c->shm_type = PA_MEM_TYPE_PRIVATE; if (c->do_shm) { if (c->version >= 31 && memfd_on_remote && c->memfd_on_local) { const char *reason; pa_pstream_enable_memfd(c->pstream); if (pa_mempool_is_memfd_backed(c->mempool)) if (pa_pstream_register_memfd_mempool(c->pstream, c->mempool, &reason)) pa_log("Failed to regester memfd mempool. Reason: %s", reason); /* Even if memfd pool registration fails, the negotiated SHM type * shall remain memfd as both endpoints claim to support it. */ c->shm_type = PA_MEM_TYPE_SHARED_MEMFD; } else c->shm_type = PA_MEM_TYPE_SHARED_POSIX; } pa_log_debug("Memfd possible: %s", pa_yes_no(c->memfd_on_local)); pa_log_debug("Negotiated SHM type: %s", pa_mem_type_to_string(c->shm_type)); reply = pa_tagstruct_command(c, PA_COMMAND_SET_CLIENT_NAME, &tag); if (c->version >= 13) { pa_init_proplist(c->proplist); pa_tagstruct_put_proplist(reply, c->proplist); } else pa_tagstruct_puts(reply, pa_proplist_gets(c->proplist, PA_PROP_APPLICATION_NAME)); pa_pstream_send_tagstruct(c->pstream, reply); pa_pdispatch_register_reply(c->pdispatch, tag, DEFAULT_TIMEOUT, setup_complete_callback, c, NULL); pa_context_set_state(c, PA_CONTEXT_SETTING_NAME); break; } case PA_CONTEXT_SETTING_NAME : if ((c->version >= 13 && (pa_tagstruct_getu32(t, &c->client_index) < 0 || c->client_index == PA_INVALID_INDEX)) || !pa_tagstruct_eof(t)) { pa_context_fail(c, PA_ERR_PROTOCOL); goto finish; } pa_context_set_state(c, PA_CONTEXT_READY); break; default: pa_assert_not_reached(); } finish: pa_context_unref(c); }
static int sharedmem_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) { #if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) char fn[32]; int fd = -1; struct shm_marker *marker; bool do_unlink = false; /* Each time we create a new SHM area, let's first drop all stale * ones */ pa_shm_cleanup(); pa_random(&m->id, sizeof(m->id)); switch (type) { #ifdef HAVE_SHM_OPEN case PA_MEM_TYPE_SHARED_POSIX: segment_name(fn, sizeof(fn), m->id); fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode); do_unlink = true; break; #endif #ifdef HAVE_MEMFD case PA_MEM_TYPE_SHARED_MEMFD: fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING); break; #endif default: goto fail; } if (fd < 0) { pa_log("%s open() failed: %s", pa_mem_type_to_string(type), pa_cstrerror(errno)); goto fail; } m->type = type; m->size = size + shm_marker_size(type); m->do_unlink = do_unlink; if (ftruncate(fd, (off_t) m->size) < 0) { pa_log("ftruncate() failed: %s", pa_cstrerror(errno)); goto fail; } #ifndef MAP_NORESERVE #define MAP_NORESERVE 0 #endif if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd, (off_t) 0)) == MAP_FAILED) { pa_log("mmap() failed: %s", pa_cstrerror(errno)); goto fail; } if (type == PA_MEM_TYPE_SHARED_POSIX) { /* We store our PID at the end of the shm block, so that we * can check for dead shm segments later */ marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - shm_marker_size(type)); pa_atomic_store(&marker->pid, (int) getpid()); pa_atomic_store(&marker->marker, SHM_MARKER); } /* For memfds, we keep the fd open until we pass it * to the other PA endpoint over unix domain socket. */ if (type == PA_MEM_TYPE_SHARED_MEMFD) m->fd = fd; else { pa_assert_se(pa_close(fd) == 0); m->fd = -1; } return 0; fail: if (fd >= 0) { #ifdef HAVE_SHM_OPEN if (type == PA_MEM_TYPE_SHARED_POSIX) shm_unlink(fn); #endif pa_close(fd); } #endif /* defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) */ return -1; }