void dns_transaction_gc(DnsTransaction *t) { assert(t); if (t->block_gc > 0) return; if (set_isempty(t->queries) && set_isempty(t->zone_items)) dns_transaction_free(t); }
int scope_abandon(Scope *s) { assert(s); if (!IN_SET(s->state, SCOPE_RUNNING, SCOPE_ABANDONED)) return -ESTALE; free(s->controller); s->controller = NULL; /* The client is no longer watching the remaining processes, * so let's step in here, under the assumption that the * remaining processes will be sooner or later reassigned to * us as parent. */ unit_tidy_watch_pids(UNIT(s), 0, 0); unit_watch_all_pids(UNIT(s)); /* If the PID set is empty now, then let's finish this off */ if (set_isempty(UNIT(s)->pids)) scope_notify_cgroup_empty_event(UNIT(s)); else scope_set_state(s, SCOPE_ABANDONED); return 0; }
static int scope_verify(Scope *s) { assert(s); if (UNIT(s)->load_state != UNIT_LOADED) return 0; if (set_isempty(UNIT(s)->pids) && UNIT(s)->manager->n_reloading <= 0) { log_unit_error(UNIT(s), "Scope has no PIDs. Refusing."); return -EINVAL; } return 0; }
/* * db_set_isempty() - This function checks to see if a set is empty. The set * (or sequence) must have no elements at all in order for this to be true. * If this is a sequence and there are only NULL elements, this function * will return false since NULL elements are still considered valid elements * for sequences. * return : non-zero if the set has no elements * set(in): set descriptor */ int db_set_isempty (DB_SET * set) { int retval; CHECK_CONNECT_FALSE (); CHECK_1ARG_FALSE (set); /* allow all types */ retval = (set_isempty (set)) ? 1 : 0; return (retval); }
static int scope_verify(Scope *s) { assert(s); if (UNIT(s)->load_state != UNIT_LOADED) return 0; if (set_isempty(UNIT(s)->pids) && !MANAGER_IS_RELOADING(UNIT(s)->manager) && !unit_has_name(UNIT(s), SPECIAL_INIT_SCOPE)) { log_unit_error(UNIT(s), "Scope has no PIDs. Refusing."); return -ENOENT; } return 0; }
static void scope_sigchld_event(Unit *u, pid_t pid, int code, int status) { /* If we get a SIGCHLD event for one of the processes we were interested in, then we look for others to watch, under the assumption that we'll sooner or later get a SIGCHLD for them, as the original process we watched was probably the parent of them, and they are hence now our children. */ unit_tidy_watch_pids(u, 0, 0); unit_watch_all_pids(u); /* If the PID set is empty now, then let's finish this off */ if (set_isempty(u->pids)) scope_notify_cgroup_empty_event(u); }
static int scope_verify(Scope *s) { assert(s); if (UNIT(s)->load_state != UNIT_LOADED) return 0; if (set_isempty(UNIT(s)->pids) && !manager_is_reloading_or_reexecuting(UNIT(s)->manager) && !unit_has_name(UNIT(s), SPECIAL_INIT_SCOPE)) { log_unit_error(UNIT(s), "Scope has no PIDs. Refusing."); return -EINVAL; } return 0; }
int automount_send_ready(Automount *a, int status) { int ioctl_fd, r; unsigned token; assert(a); assert(status <= 0); if (set_isempty(a->tokens)) return 0; ioctl_fd = open_ioctl_fd(UNIT(a)->manager->dev_autofs_fd, a->where, a->dev_id); if (ioctl_fd < 0) { r = ioctl_fd; goto fail; } if (status) log_debug_unit(UNIT(a)->id, "Sending failure: %s", strerror(-status)); else log_debug_unit(UNIT(a)->id, "Sending success."); r = 0; /* Autofs thankfully does not hand out 0 as a token */ while ((token = PTR_TO_UINT(set_steal_first(a->tokens)))) { int k; /* Autofs fun fact II: * * if you pass a positive status code here, the kernel will * freeze! Yay! */ k = autofs_send_ready(UNIT(a)->manager->dev_autofs_fd, ioctl_fd, token, status); if (k < 0) r = k; } fail: if (ioctl_fd >= 0) close_nointr_nofail(ioctl_fd); return r; }
static int automount_send_ready(Automount *a, Set *tokens, int status) { _cleanup_close_ int ioctl_fd = -1; unsigned token; int r; assert(a); assert(status <= 0); if (set_isempty(tokens)) return 0; ioctl_fd = open_ioctl_fd(UNIT(a)->manager->dev_autofs_fd, a->where, a->dev_id); if (ioctl_fd < 0) return ioctl_fd; if (status != 0) log_unit_debug_errno(UNIT(a), status, "Sending failure: %m"); else log_unit_debug(UNIT(a), "Sending success."); r = 0; /* Autofs thankfully does not hand out 0 as a token */ while ((token = PTR_TO_UINT(set_steal_first(tokens)))) { int k; /* Autofs fun fact II: * * if you pass a positive status code here, the kernel will * freeze! Yay! */ k = autofs_send_ready(UNIT(a)->manager->dev_autofs_fd, ioctl_fd, token, status); if (k < 0) r = k; } return r; }
bool exit_status_set_is_empty(ExitStatusSet *x) { if (!x) return true; return set_isempty(x->status) && set_isempty(x->signal); }
/* Use this function only if you do not have direct access to /proc/self/mountinfo but the caller can open it * for you. This is the case when /proc is masked or not mounted. Otherwise, use bind_remount_recursive. */ int bind_remount_recursive_with_mountinfo( const char *prefix, unsigned long new_flags, unsigned long flags_mask, char **blacklist, FILE *proc_self_mountinfo) { _cleanup_set_free_free_ Set *done = NULL; _cleanup_free_ char *cleaned = NULL; int r; assert(proc_self_mountinfo); /* Recursively remount a directory (and all its submounts) read-only or read-write. If the directory is already * mounted, we reuse the mount and simply mark it MS_BIND|MS_RDONLY (or remove the MS_RDONLY for read-write * operation). If it isn't we first make it one. Afterwards we apply MS_BIND|MS_RDONLY (or remove MS_RDONLY) to * all submounts we can access, too. When mounts are stacked on the same mount point we only care for each * individual "top-level" mount on each point, as we cannot influence/access the underlying mounts anyway. We * do not have any effect on future submounts that might get propagated, they migt be writable. This includes * future submounts that have been triggered via autofs. * * If the "blacklist" parameter is specified it may contain a list of subtrees to exclude from the * remount operation. Note that we'll ignore the blacklist for the top-level path. */ cleaned = strdup(prefix); if (!cleaned) return -ENOMEM; path_simplify(cleaned, false); done = set_new(&path_hash_ops); if (!done) return -ENOMEM; for (;;) { _cleanup_set_free_free_ Set *todo = NULL; bool top_autofs = false; char *x; unsigned long orig_flags; todo = set_new(&path_hash_ops); if (!todo) return -ENOMEM; rewind(proc_self_mountinfo); for (;;) { _cleanup_free_ char *path = NULL, *p = NULL, *type = NULL; int k; k = fscanf(proc_self_mountinfo, "%*s " /* (1) mount id */ "%*s " /* (2) parent id */ "%*s " /* (3) major:minor */ "%*s " /* (4) root */ "%ms " /* (5) mount point */ "%*s" /* (6) mount options (superblock) */ "%*[^-]" /* (7) optional fields */ "- " /* (8) separator */ "%ms " /* (9) file system type */ "%*s" /* (10) mount source */ "%*s" /* (11) mount options (bind mount) */ "%*[^\n]", /* some rubbish at the end */ &path, &type); if (k != 2) { if (k == EOF) break; continue; } r = cunescape(path, UNESCAPE_RELAX, &p); if (r < 0) return r; if (!path_startswith(p, cleaned)) continue; /* Ignore this mount if it is blacklisted, but only if it isn't the top-level mount we shall * operate on. */ if (!path_equal(cleaned, p)) { bool blacklisted = false; char **i; STRV_FOREACH(i, blacklist) { if (path_equal(*i, cleaned)) continue; if (!path_startswith(*i, cleaned)) continue; if (path_startswith(p, *i)) { blacklisted = true; log_debug("Not remounting %s blacklisted by %s, called for %s", p, *i, cleaned); break; } } if (blacklisted) continue; } /* Let's ignore autofs mounts. If they aren't * triggered yet, we want to avoid triggering * them, as we don't make any guarantees for * future submounts anyway. If they are * already triggered, then we will find * another entry for this. */ if (streq(type, "autofs")) { top_autofs = top_autofs || path_equal(cleaned, p); continue; } if (!set_contains(done, p)) { r = set_consume(todo, p); p = NULL; if (r == -EEXIST) continue; if (r < 0) return r; } } /* If we have no submounts to process anymore and if * the root is either already done, or an autofs, we * are done */ if (set_isempty(todo) && (top_autofs || set_contains(done, cleaned))) return 0; if (!set_contains(done, cleaned) && !set_contains(todo, cleaned)) { /* The prefix directory itself is not yet a mount, make it one. */ if (mount(cleaned, cleaned, NULL, MS_BIND|MS_REC, NULL) < 0) return -errno; orig_flags = 0; (void) get_mount_flags(cleaned, &orig_flags); orig_flags &= ~MS_RDONLY; if (mount(NULL, cleaned, NULL, (orig_flags & ~flags_mask)|MS_BIND|MS_REMOUNT|new_flags, NULL) < 0) return -errno; log_debug("Made top-level directory %s a mount point.", prefix); r = set_put_strdup(done, cleaned); if (r < 0) return r; } while ((x = set_steal_first(todo))) { r = set_consume(done, x); if (IN_SET(r, 0, -EEXIST)) continue; if (r < 0) return r; /* Deal with mount points that are obstructed by a later mount */ r = path_is_mount_point(x, NULL, 0); if (IN_SET(r, 0, -ENOENT)) continue; if (IN_SET(r, -EACCES, -EPERM)) { /* Even if root user invoke this, submounts under private FUSE or NFS mount points * may not be acceessed. E.g., * * $ bindfs --no-allow-other ~/mnt/mnt ~/mnt/mnt * $ bindfs --no-allow-other ~/mnt ~/mnt * * Then, root user cannot access the mount point ~/mnt/mnt. * In such cases, the submounts are ignored, as we have no way to manage them. */ log_debug_errno(r, "Failed to determine '%s' is mount point or not, ignoring: %m", x); continue; } if (r < 0) return r; /* Try to reuse the original flag set */ orig_flags = 0; (void) get_mount_flags(x, &orig_flags); orig_flags &= ~MS_RDONLY; if (mount(NULL, x, NULL, (orig_flags & ~flags_mask)|MS_BIND|MS_REMOUNT|new_flags, NULL) < 0) return -errno; log_debug("Remounted %s read-only.", x); } }
static void wait_for_children(Set *pids, sigset_t *mask) { usec_t until; assert(mask); if (set_isempty(pids)) return; until = now(CLOCK_MONOTONIC) + TIMEOUT_USEC; for (;;) { struct timespec ts; int k; usec_t n; void *p; Iterator i; /* First, let the kernel inform us about killed * children. Most processes will probably be our * children, but some are not (might be our * grandchildren instead...). */ for (;;) { pid_t pid; pid = waitpid(-1, NULL, WNOHANG); if (pid == 0) break; if (pid < 0) { if (errno == ECHILD) break; log_error("waitpid() failed: %m"); return; } set_remove(pids, ULONG_TO_PTR(pid)); } /* Now explicitly check who might be remaining, who * might not be our child. */ SET_FOREACH(p, pids, i) { /* We misuse getpgid as a check whether a * process still exists. */ if (getpgid((pid_t) PTR_TO_ULONG(p)) >= 0) continue; if (errno != ESRCH) continue; set_remove(pids, p); } if (set_isempty(pids)) return; n = now(CLOCK_MONOTONIC); if (n >= until) return; timespec_store(&ts, until - n); k = sigtimedwait(mask, NULL, &ts); if (k != SIGCHLD) { if (k < 0 && errno != EAGAIN) { log_error("sigtimedwait() failed: %m"); return; } if (k >= 0) log_warning("sigtimedwait() returned unexpected signal."); } } }
int dns_transaction_go(DnsTransaction *t) { bool had_stream; usec_t ts; int r; assert(t); had_stream = !!t->stream; dns_transaction_stop(t); log_debug("Excercising transaction on scope %s on %s/%s", dns_protocol_to_string(t->scope->protocol), t->scope->link ? t->scope->link->name : "*", t->scope->family == AF_UNSPEC ? "*" : af_to_name(t->scope->family)); if (t->n_attempts >= TRANSACTION_ATTEMPTS_MAX(t->scope->protocol)) { dns_transaction_complete(t, DNS_TRANSACTION_ATTEMPTS_MAX_REACHED); return 0; } if (t->scope->protocol == DNS_PROTOCOL_LLMNR && had_stream) { /* If we already tried via a stream, then we don't * retry on LLMNR. See RFC 4795, Section 2.7. */ dns_transaction_complete(t, DNS_TRANSACTION_ATTEMPTS_MAX_REACHED); return 0; } assert_se(sd_event_now(t->scope->manager->event, clock_boottime_or_monotonic(), &ts) >= 0); t->n_attempts++; t->start_usec = ts; t->received = dns_packet_unref(t->received); t->cached = dns_answer_unref(t->cached); t->cached_rcode = 0; /* Check the cache, but only if this transaction is not used * for probing or verifying a zone item. */ if (set_isempty(t->zone_items)) { /* Before trying the cache, let's make sure we figured out a * server to use. Should this cause a change of server this * might flush the cache. */ dns_scope_get_dns_server(t->scope); /* Let's then prune all outdated entries */ dns_cache_prune(&t->scope->cache); r = dns_cache_lookup(&t->scope->cache, t->key, &t->cached_rcode, &t->cached); if (r < 0) return r; if (r > 0) { if (t->cached_rcode == DNS_RCODE_SUCCESS) dns_transaction_complete(t, DNS_TRANSACTION_SUCCESS); else dns_transaction_complete(t, DNS_TRANSACTION_FAILURE); return 0; } } if (t->scope->protocol == DNS_PROTOCOL_LLMNR && !t->initial_jitter) { usec_t jitter; /* RFC 4795 Section 2.7 suggests all queries should be * delayed by a random time from 0 to JITTER_INTERVAL. */ t->initial_jitter = true; random_bytes(&jitter, sizeof(jitter)); jitter %= LLMNR_JITTER_INTERVAL_USEC; r = sd_event_add_time( t->scope->manager->event, &t->timeout_event_source, clock_boottime_or_monotonic(), ts + jitter, LLMNR_JITTER_INTERVAL_USEC, on_transaction_timeout, t); if (r < 0) return r; t->n_attempts = 0; t->state = DNS_TRANSACTION_PENDING; log_debug("Delaying LLMNR transaction for " USEC_FMT "us.", jitter); return 0; } /* Otherwise, we need to ask the network */ r = dns_transaction_make_packet(t); if (r == -EDOM) { /* Not the right request to make on this network? * (i.e. an A request made on IPv6 or an AAAA request * made on IPv4, on LLMNR or mDNS.) */ dns_transaction_complete(t, DNS_TRANSACTION_NO_SERVERS); return 0; } if (r < 0) return r; if (t->scope->protocol == DNS_PROTOCOL_LLMNR && (dns_name_endswith(DNS_RESOURCE_KEY_NAME(t->key), "in-addr.arpa") > 0 || dns_name_endswith(DNS_RESOURCE_KEY_NAME(t->key), "ip6.arpa") > 0)) { /* RFC 4795, Section 2.4. says reverse lookups shall * always be made via TCP on LLMNR */ r = dns_transaction_open_tcp(t); } else { /* Try via UDP, and if that fails due to large size try via TCP */ r = dns_transaction_emit(t); if (r == -EMSGSIZE) r = dns_transaction_open_tcp(t); } if (r == -ESRCH) { /* No servers to send this to? */ dns_transaction_complete(t, DNS_TRANSACTION_NO_SERVERS); return 0; } else if (r < 0) { if (t->scope->protocol != DNS_PROTOCOL_DNS) { dns_transaction_complete(t, DNS_TRANSACTION_RESOURCES); return 0; } /* Couldn't send? Try immediately again, with a new server */ dns_transaction_next_dns_server(t); return dns_transaction_go(t); } r = sd_event_add_time( t->scope->manager->event, &t->timeout_event_source, clock_boottime_or_monotonic(), ts + transaction_get_resend_timeout(t), 0, on_transaction_timeout, t); if (r < 0) return r; t->state = DNS_TRANSACTION_PENDING; return 1; }
bool fdset_isempty(FDSet *fds) { return set_isempty(MAKE_SET(fds)); }
static void test_struct(void) { Prioq *q; Set *s; unsigned previous = 0, i; int r; srand(0); q = prioq_new(test_compare); assert_se(q); s = set_new(test_hash, test_compare); assert_se(s); for (i = 0; i < SET_SIZE; i++) { struct test *t; t = new0(struct test, 1); assert_se(t); t->value = (unsigned) rand(); r = prioq_put(q, t, &t->idx); assert_se(r >= 0); if (i % 4 == 0) { r = set_consume(s, t); assert_se(r >= 0); } } for (;;) { struct test *t; t = set_steal_first(s); if (!t) break; r = prioq_remove(q, t, &t->idx); assert_se(r > 0); free(t); } for (i = 0; i < SET_SIZE * 3 / 4; i++) { struct test *t; assert_se(prioq_size(q) == (SET_SIZE * 3 / 4) - i); t = prioq_pop(q); assert_se(t); assert_se(previous <= t->value); previous = t->value; free(t); } assert_se(prioq_isempty(q)); prioq_free(q); assert_se(set_isempty(s)); set_free(s); }