int main(int argc, char **argv) { int r; progname = argv[0]; opts(argc, argv); job_init(); prot_init(); r = make_server_socket(host_addr, port); if (r == -1) twarnx("make_server_socket()"), exit(111); if (user) su(user); if (detach) daemonize(); event_init(); set_sig_handlers(); nudge_fd_limit(); unbrake((evh) h_accept); event_dispatch(); twarnx("got here for some reason"); return 0; }
int main(int argc, char **argv) { int r, l; struct event_base *ev_base; struct job binlog_jobs = {}; progname = argv[0]; opts(argc, argv); if (detach && binlog_dir) { if (binlog_dir[0] != '/') { warnx("The -b option requires an absolute path when used with -d."); usage("Path is not absolute", binlog_dir); } } job_init(); prot_init(); /* We want to make sure that only one beanstalkd tries to use the binlog * directory at a time. So acquire a lock now and never release it. */ if (binlog_dir) { r = binlog_lock(); if (!r) twarnx("failed to lock binlog dir %s", binlog_dir), exit(10); } r = make_server_socket(host_addr, port); if (r == -1) twarnx("make_server_socket()"), exit(111); l = r; if (user) su(user); ev_base = event_init(); set_sig_handlers(); nudge_fd_limit(); r = listen(l, 1024); if (r == -1) twarn("listen()"); accept_handler = (evh)h_accept; unbrake(); binlog_jobs.prev = binlog_jobs.next = &binlog_jobs; binlog_init(&binlog_jobs); prot_replay_binlog(&binlog_jobs); if (detach) { daemonize(); event_reinit(ev_base); } event_dispatch(); twarnx("event_dispatch error"); binlog_shutdown(); return 0; }
static void rehash() { job *old = all_jobs; size_t old_cap = all_jobs_cap, old_used = all_jobs_used, i; if (cur_prime >= NUM_PRIMES) return; if (hash_table_was_oom) return; all_jobs_cap = primes[++cur_prime]; all_jobs = calloc(all_jobs_cap, sizeof(job)); if (!all_jobs) { twarnx("Failed to allocate %d new hash buckets", all_jobs_cap); hash_table_was_oom = 1; --cur_prime; all_jobs = old; all_jobs_cap = old_cap; all_jobs_used = old_used; return; } all_jobs_used = 0; for (i = 0; i < old_cap; i++) { while (old[i]) { job j = old[i]; old[i] = j->ht_next; j->ht_next = NULL; store_job(j); } } }
job make_job_with_id(unsigned int pri, unsigned int delay, unsigned int ttr, int body_size, tube tube, unsigned long long id) { job j; j = allocate_job(body_size); if (!j) return twarnx("OOM"), (job) 0; if (id) { j->id = id; if (id >= next_id) next_id = id + 1; } else { j->id = next_id++; } j->pri = pri; j->delay = delay; j->ttr = ttr; store_job(j); TUBE_ASSIGN(j->tube, tube); return j; }
void job_init() { all_jobs = calloc(all_jobs_cap, sizeof(job)); if (!all_jobs) { twarnx("Failed to allocate %d hash buckets", all_jobs_cap); } }
void tube_dref(tube t) { if (!t) return; if (t->refs < 1) return twarnx("refs is zero for tube: %s", t->name); --t->refs; if (t->refs < 1) tube_free(t); }
int main(int argc, char **argv) { int r; Srv s = {}; s.wal.filesz = Filesizedef; struct job list = {}; progname = argv[0]; opts(argc, argv, &s.wal); if (verbose) { printf("pid %d\n", getpid()); } r = make_server_socket(host_addr, port); if (r == -1) twarnx("make_server_socket()"), exit(111); s.sock.fd = r; prot_init(); if (user) su(user); set_sig_handlers(); if (s.wal.use) { // We want to make sure that only one beanstalkd tries // to use the wal directory at a time. So acquire a lock // now and never release it. if (!waldirlock(&s.wal)) { twarnx("failed to lock wal dir %s", s.wal.dir); exit(10); } list.prev = list.next = &list; walinit(&s.wal, &list); prot_replay(&s, &list); } srv(&s); return 0; }
static void su(const char *user) { int r; struct passwd *pwent; errno = 0; pwent = getpwnam(user); if (errno) twarn("getpwnam(\"%s\")", user), exit(32); if (!pwent) twarnx("getpwnam(\"%s\"): no such user", user), exit(33); r = setgid(pwent->pw_gid); if (r == -1) twarn("setgid(%d \"%s\")", pwent->pw_gid, user), exit(34); r = setuid(pwent->pw_uid); if (r == -1) twarn("setuid(%d \"%s\")", pwent->pw_uid, user), exit(34); }
job job_copy(job j) { job n; if (!j) return NULL; n = malloc(sizeof(struct job) + j->body_size); if (!n) return twarnx("OOM"), (job) 0; memcpy(n, j, sizeof(struct job) + j->body_size); n->next = n->prev = n; /* not in a linked list */ n->binlog = NULL; /* copies do not have refcnt on the binlog */ n->tube = 0; /* Don't use memcpy for the tube, which we must refcount. */ TUBE_ASSIGN(n->tube, j->tube); return n; }
job allocate_job(int body_size) { job j; j = malloc(sizeof(struct job) + body_size); if (!j) return twarnx("OOM"), (job) 0; j->id = 0; j->state = JOB_STATE_INVALID; j->creation = time(NULL); j->timeout_ct = j->release_ct = j->bury_ct = j->kick_ct = 0; j->body_size = body_size; j->next = j->prev = j; /* not in a linked list */ j->ht_next = NULL; j->tube = NULL; j->binlog = NULL; j->heap_index = 0; j->reserved_binlog_space = 0; return j; }
tube make_tube(const char *name) { tube t; t = malloc(sizeof(struct tube)); if (!t) return NULL; t->refs = 0; t->name[MAX_TUBE_NAME_LEN - 1] = '\0'; strncpy(t->name, name, MAX_TUBE_NAME_LEN - 1); if (t->name[MAX_TUBE_NAME_LEN - 1] != '\0') twarnx("truncating tube name"); t->ready.cmp = job_pri_cmp; t->delay.cmp = job_delay_cmp; t->ready.rec = job_setheappos; t->delay.rec = job_setheappos; t->buried = (struct job) { }; t->buried.prev = t->buried.next = &t->buried; ms_init(&t->waiting, NULL, NULL); t->stat = (struct stats) {0, 0, 0, 0, 0}; t->using_ct = t->watching_ct = 0; t->deadline_at = t->pause = 0; return t; } static void tube_free(tube t) { prot_remove_tube(t); free(t->ready.data); free(t->delay.data); ms_clear(&t->waiting); free(t); }
job job_copy(job j) { job n; if (!j) return NULL; n = malloc(sizeof(struct job) + j->body_size); if (!n) return twarnx("OOM"), (job) 0; memcpy(n, j, sizeof(struct job) + j->body_size); n->next = n->prev = n; /* not in a linked list */ n->binlog = NULL; /* copies do not have refcnt on the binlog */ n->tube = 0; /* Don't use memcpy for the tube, which we must refcount. */ TUBE_ASSIGN(n->tube, j->tube); /* Mark this job as a copy so it can be appropriately freed later on */ n->state = JOB_STATE_COPY; return n; }
int make_server_socket(char *host, char *port) { int fd = -1, flags, r; struct linger linger = {0, 0}; struct addrinfo *airoot, *ai, hints; /* See if we got a listen fd from systemd. If so, all socket options etc * are already set, so we check that the fd is a TCP listen socket and * return. */ r = sd_listen_fds(1); if (r < 0) { return twarn("sd_listen_fds"), -1; } if (r > 0) { if (r > 1) { twarnx("inherited more than one listen socket;" " ignoring all but the first"); r = 1; } fd = SD_LISTEN_FDS_START; r = sd_is_socket_inet(fd, 0, SOCK_STREAM, 1, 0); if (r < 0) { errno = -r; twarn("sd_is_socket_inet"); return -1; } if (!r) { twarnx("inherited fd is not a TCP listen socket"); return -1; } return fd; } memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; r = getaddrinfo(host, port, &hints, &airoot); if (r == -1) return twarn("getaddrinfo()"), -1; for(ai = airoot; ai; ai = ai->ai_next) { fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); if (fd == -1) { twarn("socket()"); continue; } flags = fcntl(fd, F_GETFL, 0); if (flags < 0) { twarn("getting flags"); close(fd); continue; } r = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (r == -1) { twarn("setting O_NONBLOCK"); close(fd); continue; } flags = 1; setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &flags, sizeof flags); setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &flags, sizeof flags); setsockopt(fd, SOL_SOCKET, SO_LINGER, &linger, sizeof linger); setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &flags, sizeof flags); if (verbose) { char hbuf[NI_MAXHOST], pbuf[NI_MAXSERV], *h = host, *p = port; r = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof hbuf, pbuf, sizeof pbuf, NI_NUMERICHOST|NI_NUMERICSERV); if (!r) { h = hbuf; p = pbuf; } if (ai->ai_family == AF_INET6) { printf("bind %d [%s]:%s\n", fd, h, p); } else { printf("bind %d %s:%s\n", fd, h, p); } } r = bind(fd, ai->ai_addr, ai->ai_addrlen); if (r == -1) { twarn("bind()"); close(fd); continue; } r = listen(fd, 1024); if (r == -1) { twarn("listen()"); close(fd); continue; } break; } freeaddrinfo(airoot); if(ai == NULL) fd = -1; return fd; }