* Implement search in an external script. The results are streamed * back into a local listing. */ #include <assert.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <sys/wait.h> #include "debug.h" #include "excrate.h" #include "rig.h" #include "status.h" static struct list excrates = LIST_INIT(excrates); static int excrate_init(struct excrate *e, const char *script, const char *search, struct listing *storage) { pid_t pid; fprintf(stderr, "External scan '%s'...\n", search); pid = fork_pipe_nb(&e->fd, script, "scan", search, NULL); if (pid == -1) return -1; e->pid = pid; e->pe = NULL; e->terminated = false;
void dhcp6_timer_init() { LIST_INIT(&timer_head); tm_sentinel = tm_max; }
int main(int argc, char *argv[]) { int ch; int mode = -1; int which = -1; char *config = NULL; struct userconf *cnf; static const char *opts[W_NUM][M_NUM] = { { /* user */ "V:C:qn:u:c:d:e:p:g:G:mM:k:s:oL:i:w:h:H:Db:NPy:Y", "V:C:qn:u:rY", "V:C:qn:u:c:d:e:p:g:G:mM:l:k:s:w:L:h:H:FNPY", "V:C:qn:u:FPa7", "V:C:q", "V:C:q", "V:C:q" }, { /* grp */ "V:C:qn:g:h:H:M:opNPY", "V:C:qn:g:Y", "V:C:qn:d:g:l:h:H:FM:m:NPY", "V:C:qn:g:FPa", "V:C:q" } }; static int (*funcs[W_NUM]) (struct userconf * _cnf, int _mode, struct cargs * _args) = { /* Request handlers */ pw_user, pw_group }; LIST_INIT(&arglist); (void)setlocale(LC_ALL, ""); /* * Break off the first couple of words to determine what exactly * we're being asked to do */ while (argc > 1) { int tmp; if (*argv[1] == '-') { /* * Special case, allow pw -V<dir> <operation> [args] for scripts etc. */ if (argv[1][1] == 'V') { optarg = &argv[1][2]; if (*optarg == '\0') { optarg = argv[2]; ++argv; --argc; } addarg(&arglist, 'V', optarg); } else break; } else if (mode == -1 && (tmp = getindex(Modes, argv[1])) != -1) mode = tmp; else if (which == -1 && (tmp = getindex(Which, argv[1])) != -1) which = tmp; else if ((mode == -1 && which == -1) && ((tmp = getindex(Combo1, argv[1])) != -1 || (tmp = getindex(Combo2, argv[1])) != -1)) { which = tmp / M_NUM; mode = tmp % M_NUM; } else if (strcmp(argv[1], "help") == 0 && argv[2] == NULL) cmdhelp(mode, which); else if (which != -1 && mode != -1) addarg(&arglist, 'n', argv[1]); else errx(EX_USAGE, "unknown keyword `%s'", argv[1]); ++argv; --argc; } /* * Bail out unless the user is specific! */ if (mode == -1 || which == -1) cmdhelp(mode, which); /* * We know which mode we're in and what we're about to do, so now * let's dispatch the remaining command line args in a genric way. */ optarg = NULL; while ((ch = getopt(argc, argv, opts[which][mode])) != -1) { if (ch == '?') errx(EX_USAGE, "unknown switch"); else addarg(&arglist, ch, optarg); optarg = NULL; } /* * Must be root to attempt an update */ if (geteuid() != 0 && mode != M_PRINT && mode != M_NEXT && getarg(&arglist, 'N')==NULL) errx(EX_NOPERM, "you must be root to run this program"); /* * We should immediately look for the -q 'quiet' switch so that we * don't bother with extraneous errors */ if (getarg(&arglist, 'q') != NULL) freopen(_PATH_DEVNULL, "w", stderr); /* * Set our base working path if not overridden */ config = getarg(&arglist, 'C') ? getarg(&arglist, 'C')->val : NULL; if (getarg(&arglist, 'V') != NULL) { char * etcpath = getarg(&arglist, 'V')->val; if (*etcpath) { if (config == NULL) { /* Only override config location if -C not specified */ config = malloc(MAXPATHLEN); snprintf(config, MAXPATHLEN, "%s/pw.conf", etcpath); } memcpy(&PWF, &VPWF, sizeof PWF); setpwdir(etcpath); setgrdir(etcpath); } } /* * Now, let's do the common initialisation */ cnf = read_userconfig(config); ch = funcs[which] (cnf, mode, &arglist); /* * If everything went ok, and we've been asked to update * the NIS maps, then do it now */ if (ch == EXIT_SUCCESS && getarg(&arglist, 'Y') != NULL) { pid_t pid; fflush(NULL); if (chdir(_PATH_YP) == -1) warn("chdir(" _PATH_YP ")"); else if ((pid = fork()) == -1) warn("fork()"); else if (pid == 0) { /* Is make anywhere else? */ execlp("/usr/bin/make", "make", (char *)NULL); _exit(1); } else { int i; waitpid(pid, &i, 0); if ((i = WEXITSTATUS(i)) != 0) errx(ch, "make exited with status %d", i); else pw_log(cnf, mode, which, "NIS maps updated"); } } return ch; }
struct dispex_dynamic_data_t { DWORD buf_size; DWORD prop_cnt; dynamic_prop_t *props; func_disp_t **func_disps; }; #define DISPID_DYNPROP_0 0x50000000 #define DISPID_DYNPROP_MAX 0x5fffffff #define FDEX_VERSION_MASK 0xf0000000 static ITypeLib *typelib; static ITypeInfo *typeinfos[LAST_tid]; static struct list dispex_data_list = LIST_INIT(dispex_data_list); static REFIID tid_ids[] = { #define XIID(iface) &IID_ ## iface, #define XDIID(iface) &DIID_ ## iface, TID_LIST #undef XIID #undef XDIID }; static HRESULT load_typelib(void) { HRESULT hres; ITypeLib *tl; hres = LoadRegTypeLib(&LIBID_MSHTML, 4, 0, LOCALE_SYSTEM_DEFAULT, &tl);
/* process HTTP or SSDP requests */ int main(int argc, char **argv) { int ret, i; int sudp = -1, shttpl = -1; int smonitor = -1; LIST_HEAD(httplisthead, upnphttp) upnphttphead; struct upnphttp * e = 0; struct upnphttp * next; fd_set readset; /* for select() */ fd_set writeset; struct timeval timeout, timeofday, lastnotifytime = {0, 0}; time_t lastupdatetime = 0; int max_fd = -1; int last_changecnt = 0; pid_t scanner_pid = 0; pthread_t inotify_thread = 0; #ifdef TIVO_SUPPORT uint8_t beacon_interval = 5; int sbeacon = -1; struct sockaddr_in tivo_bcast; struct timeval lastbeacontime = {0, 0}; #endif for (i = 0; i < L_MAX; i++) log_level[i] = E_WARN; #ifdef ENABLE_NLS setlocale(LC_MESSAGES, ""); setlocale(LC_CTYPE, "en_US.utf8"); DPRINTF(E_DEBUG, L_GENERAL, "Using locale dir %s\n", bindtextdomain("minidlna", getenv("TEXTDOMAINDIR"))); textdomain("minidlna"); #endif ret = init(argc, argv); if (ret != 0) return 1; DPRINTF(E_WARN, L_GENERAL, "Starting " SERVER_NAME " version " MINIDLNA_VERSION ".\n"); if (sqlite3_libversion_number() < 3005001) { DPRINTF(E_WARN, L_GENERAL, "SQLite library is old. Please use version 3.5.1 or newer.\n"); } LIST_INIT(&upnphttphead); ret = open_db(NULL); if (ret == 0) { updateID = sql_get_int_field(db, "SELECT VALUE from SETTINGS where KEY = 'UPDATE_ID'"); if (updateID == -1) ret = -1; } check_db(db, ret, &scanner_pid); signal(SIGCHLD, &sigchld); #ifdef HAVE_INOTIFY if( GETFLAG(INOTIFY_MASK) ) { if (!sqlite3_threadsafe() || sqlite3_libversion_number() < 3005001) DPRINTF(E_ERROR, L_GENERAL, "SQLite library is not threadsafe! " "Inotify will be disabled.\n"); else if (pthread_create(&inotify_thread, NULL, start_inotify, NULL) != 0) DPRINTF(E_FATAL, L_GENERAL, "ERROR: pthread_create() failed for start_inotify. EXITING\n"); } #endif smonitor = OpenAndConfMonitorSocket(); sudp = OpenAndConfSSDPReceiveSocket(); if (sudp < 0) { DPRINTF(E_INFO, L_GENERAL, "Failed to open socket for receiving SSDP. Trying to use MiniSSDPd\n"); if (SubmitServicesToMiniSSDPD(lan_addr[0].str, runtime_vars.port) < 0) DPRINTF(E_FATAL, L_GENERAL, "Failed to connect to MiniSSDPd. EXITING"); } /* open socket for HTTP connections. */ shttpl = OpenAndConfHTTPSocket(runtime_vars.port); if (shttpl < 0) DPRINTF(E_FATAL, L_GENERAL, "Failed to open socket for HTTP. EXITING\n"); DPRINTF(E_WARN, L_GENERAL, "HTTP listening on port %d\n", runtime_vars.port); #ifdef TIVO_SUPPORT if (GETFLAG(TIVO_MASK)) { DPRINTF(E_WARN, L_GENERAL, "TiVo support is enabled.\n"); /* Add TiVo-specific randomize function to sqlite */ ret = sqlite3_create_function(db, "tivorandom", 1, SQLITE_UTF8, NULL, &TiVoRandomSeedFunc, NULL, NULL); if (ret != SQLITE_OK) DPRINTF(E_ERROR, L_TIVO, "ERROR: Failed to add sqlite randomize function for TiVo!\n"); /* open socket for sending Tivo notifications */ sbeacon = OpenAndConfTivoBeaconSocket(); if(sbeacon < 0) DPRINTF(E_FATAL, L_GENERAL, "Failed to open sockets for sending Tivo beacon notify " "messages. EXITING\n"); tivo_bcast.sin_family = AF_INET; tivo_bcast.sin_addr.s_addr = htonl(getBcastAddress()); tivo_bcast.sin_port = htons(2190); } else sbeacon = -1; #endif SendSSDPGoodbyes(); /* main loop */ while (!quitting) { /* Check if we need to send SSDP NOTIFY messages and do it if * needed */ if (gettimeofday(&timeofday, 0) < 0) { DPRINTF(E_ERROR, L_GENERAL, "gettimeofday(): %s\n", strerror(errno)); timeout.tv_sec = runtime_vars.notify_interval; timeout.tv_usec = 0; } else { /* the comparison is not very precise but who cares ? */ if (timeofday.tv_sec >= (lastnotifytime.tv_sec + runtime_vars.notify_interval)) { DPRINTF(E_DEBUG, L_SSDP, "Sending SSDP notifies\n"); for (i = 0; i < n_lan_addr; i++) { SendSSDPNotifies(lan_addr[i].snotify, lan_addr[i].str, runtime_vars.port, runtime_vars.notify_interval); } memcpy(&lastnotifytime, &timeofday, sizeof(struct timeval)); timeout.tv_sec = runtime_vars.notify_interval; timeout.tv_usec = 0; } else { timeout.tv_sec = lastnotifytime.tv_sec + runtime_vars.notify_interval - timeofday.tv_sec; if (timeofday.tv_usec > lastnotifytime.tv_usec) { timeout.tv_usec = 1000000 + lastnotifytime.tv_usec - timeofday.tv_usec; timeout.tv_sec--; } else timeout.tv_usec = lastnotifytime.tv_usec - timeofday.tv_usec; } #ifdef TIVO_SUPPORT if (GETFLAG(TIVO_MASK)) { if (timeofday.tv_sec >= (lastbeacontime.tv_sec + beacon_interval)) { sendBeaconMessage(sbeacon, &tivo_bcast, sizeof(struct sockaddr_in), 1); memcpy(&lastbeacontime, &timeofday, sizeof(struct timeval)); if (timeout.tv_sec > beacon_interval) { timeout.tv_sec = beacon_interval; timeout.tv_usec = 0; } /* Beacons should be sent every 5 seconds or so for the first minute, * then every minute or so thereafter. */ if (beacon_interval == 5 && (timeofday.tv_sec - startup_time) > 60) beacon_interval = 60; } else if (timeout.tv_sec > (lastbeacontime.tv_sec + beacon_interval + 1 - timeofday.tv_sec)) timeout.tv_sec = lastbeacontime.tv_sec + beacon_interval - timeofday.tv_sec; } #endif } if (scanning) { if (!scanner_pid || kill(scanner_pid, 0) != 0) { scanning = 0; updateID++; } } /* select open sockets (SSDP, HTTP listen, and all HTTP soap sockets) */ FD_ZERO(&readset); if (sudp >= 0) { FD_SET(sudp, &readset); max_fd = MAX(max_fd, sudp); } if (shttpl >= 0) { FD_SET(shttpl, &readset); max_fd = MAX(max_fd, shttpl); } #ifdef TIVO_SUPPORT if (sbeacon >= 0) { FD_SET(sbeacon, &readset); max_fd = MAX(max_fd, sbeacon); } #endif if (smonitor >= 0) { FD_SET(smonitor, &readset); max_fd = MAX(max_fd, smonitor); } i = 0; /* active HTTP connections count */ for (e = upnphttphead.lh_first; e != NULL; e = e->entries.le_next) { if ((e->socket >= 0) && (e->state <= 2)) { FD_SET(e->socket, &readset); max_fd = MAX(max_fd, e->socket); i++; } } #ifdef DEBUG /* for debug */ if (i > 1) DPRINTF(E_DEBUG, L_GENERAL, "%d active incoming HTTP connections\n", i); #endif FD_ZERO(&writeset); upnpevents_selectfds(&readset, &writeset, &max_fd); ret = select(max_fd+1, &readset, &writeset, 0, &timeout); if (ret < 0) { if(quitting) goto shutdown; if(errno == EINTR) continue; DPRINTF(E_ERROR, L_GENERAL, "select(all): %s\n", strerror(errno)); DPRINTF(E_FATAL, L_GENERAL, "Failed to select open sockets. EXITING\n"); } upnpevents_processfds(&readset, &writeset); /* process SSDP packets */ if (sudp >= 0 && FD_ISSET(sudp, &readset)) { /*DPRINTF(E_DEBUG, L_GENERAL, "Received UDP Packet\n");*/ ProcessSSDPRequest(sudp, (unsigned short)runtime_vars.port); } #ifdef TIVO_SUPPORT if (sbeacon >= 0 && FD_ISSET(sbeacon, &readset)) { /*DPRINTF(E_DEBUG, L_GENERAL, "Received UDP Packet\n");*/ ProcessTiVoBeacon(sbeacon); } #endif if (smonitor >= 0 && FD_ISSET(smonitor, &readset)) { ProcessMonitorEvent(smonitor); } /* increment SystemUpdateID if the content database has changed, * and if there is an active HTTP connection, at most once every 2 seconds */ if (i && (timeofday.tv_sec >= (lastupdatetime + 2))) { if (scanning || sqlite3_total_changes(db) != last_changecnt) { updateID++; last_changecnt = sqlite3_total_changes(db); upnp_event_var_change_notify(EContentDirectory); lastupdatetime = timeofday.tv_sec; } } /* process active HTTP connections */ for (e = upnphttphead.lh_first; e != NULL; e = e->entries.le_next) { if ((e->socket >= 0) && (e->state <= 2) && (FD_ISSET(e->socket, &readset))) Process_upnphttp(e); } /* process incoming HTTP connections */ if (shttpl >= 0 && FD_ISSET(shttpl, &readset)) { int shttp; socklen_t clientnamelen; struct sockaddr_in clientname; clientnamelen = sizeof(struct sockaddr_in); shttp = accept(shttpl, (struct sockaddr *)&clientname, &clientnamelen); if (shttp<0) { DPRINTF(E_ERROR, L_GENERAL, "accept(http): %s\n", strerror(errno)); } else { struct upnphttp * tmp = 0; DPRINTF(E_DEBUG, L_GENERAL, "HTTP connection from %s:%d\n", inet_ntoa(clientname.sin_addr), ntohs(clientname.sin_port) ); /*if (fcntl(shttp, F_SETFL, O_NONBLOCK) < 0) { DPRINTF(E_ERROR, L_GENERAL, "fcntl F_SETFL, O_NONBLOCK\n"); }*/ /* Create a new upnphttp object and add it to * the active upnphttp object list */ tmp = New_upnphttp(shttp); if (tmp) { tmp->clientaddr = clientname.sin_addr; LIST_INSERT_HEAD(&upnphttphead, tmp, entries); } else { DPRINTF(E_ERROR, L_GENERAL, "New_upnphttp() failed\n"); close(shttp); } } } /* delete finished HTTP connections */ for (e = upnphttphead.lh_first; e != NULL; e = next) { next = e->entries.le_next; if(e->state >= 100) { LIST_REMOVE(e, entries); Delete_upnphttp(e); } } } shutdown: /* kill the scanner */ if (scanning && scanner_pid) kill(scanner_pid, 9); /* close out open sockets */ while (upnphttphead.lh_first != NULL) { e = upnphttphead.lh_first; LIST_REMOVE(e, entries); Delete_upnphttp(e); } if (sudp >= 0) close(sudp); if (shttpl >= 0) close(shttpl); #ifdef TIVO_SUPPORT if (sbeacon >= 0) close(sbeacon); #endif if (SendSSDPGoodbyes() < 0) DPRINTF(E_ERROR, L_GENERAL, "Failed to broadcast good-bye notifications\n"); for (i = 0; i < n_lan_addr; i++) { close(lan_addr[i].snotify); } if (inotify_thread) pthread_join(inotify_thread, NULL); sql_exec(db, "UPDATE SETTINGS set VALUE = '%u' where KEY = 'UPDATE_ID'", updateID); sqlite3_close(db); upnpevents_removeSubscribers(); if (pidfilename && unlink(pidfilename) < 0) DPRINTF(E_ERROR, L_GENERAL, "Failed to remove pidfile %s: %s\n", pidfilename, strerror(errno)); log_close(); freeoptions(); exit(EXIT_SUCCESS); }
#include "gdi_private.h" #include "wine/unicode.h" #include "wine/list.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(driver); struct graphics_driver { struct list entry; HMODULE module; /* module handle */ DC_FUNCTIONS funcs; }; static struct list drivers = LIST_INIT( drivers ); static struct graphics_driver *display_driver; static CRITICAL_SECTION driver_section; static CRITICAL_SECTION_DEBUG critsect_debug = { 0, 0, &driver_section, { &critsect_debug.ProcessLocksList, &critsect_debug.ProcessLocksList }, 0, 0, { (DWORD_PTR)(__FILE__ ": driver_section") } }; static CRITICAL_SECTION driver_section = { &critsect_debug, -1, 0, 0, 0, 0 }; /********************************************************************** * create_driver * * Allocate and fill the driver structure for a given module.
void greattach(int n) { LIST_INIT(&gre_softc_list); if_clone_attach(&gre_cloner); }
void dhcp6_timer_init(void) { LIST_INIT(&client6_config.timer_head); client6_config.tm_sentinel = ULLONG_MAX; }
/* allocates a new hash */ scr_hash* scr_hash_new() { scr_hash* hash = (scr_hash*) SCR_MALLOC(sizeof(scr_hash)); LIST_INIT(hash); return hash; }
/* * General fork call. Note that another LWP in the process may call exec() * or exit() while we are forking. It's safe to continue here, because * neither operation will complete until all LWPs have exited the process. */ int fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize, void (*func)(void *), void *arg, register_t *retval, struct proc **rnewprocp) { struct proc *p1, *p2, *parent; struct plimit *p1_lim; uid_t uid; struct lwp *l2; int count; vaddr_t uaddr; int tnprocs; int tracefork; int error = 0; p1 = l1->l_proc; uid = kauth_cred_getuid(l1->l_cred); tnprocs = atomic_inc_uint_nv(&nprocs); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. */ if (__predict_false(tnprocs >= maxproc)) error = -1; else error = kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL); if (error) { static struct timeval lasttfm; atomic_dec_uint(&nprocs); if (ratecheck(&lasttfm, &fork_tfmrate)) tablefull("proc", "increase kern.maxproc or NPROC"); if (forkfsleep) kpause("forkmx", false, forkfsleep, NULL); return EAGAIN; } /* * Enforce limits. */ count = chgproccnt(uid, 1); if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) { if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT, p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); if (forkfsleep) kpause("forkulim", false, forkfsleep, NULL); return EAGAIN; } } /* * Allocate virtual address space for the U-area now, while it * is still easy to abort the fork operation if we're out of * kernel virtual address space. */ uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); return ENOMEM; } /* * We are now committed to the fork. From here on, we may * block on resources, but resource allocation may NOT fail. */ /* Allocate new proc. */ p2 = proc_alloc(); /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ memset(&p2->p_startzero, 0, (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero)); memcpy(&p2->p_startcopy, &p1->p_startcopy, (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy)); TAILQ_INIT(&p2->p_sigpend.sp_info); LIST_INIT(&p2->p_lwps); LIST_INIT(&p2->p_sigwaiters); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * Inherit flags we want to keep. The flags related to SIGCHLD * handling are important in order to keep a consistent behaviour * for the child after the fork. If we are a 32-bit process, the * child will be too. */ p2->p_flag = p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32); p2->p_emul = p1->p_emul; p2->p_execsw = p1->p_execsw; if (flags & FORK_SYSTEM) { /* * Mark it as a system process. Set P_NOCLDWAIT so that * children are reparented to init(8) when they exit. * init(8) can easily wait them out for us. */ p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT); } mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE); rw_init(&p2->p_reflock); cv_init(&p2->p_waitcv, "wait"); cv_init(&p2->p_lwpcv, "lwpwait"); /* * Share a lock between the processes if they are to share signal * state: we must synchronize access to it. */ if (flags & FORK_SHARESIGS) { p2->p_lock = p1->p_lock; mutex_obj_hold(p1->p_lock); } else p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); kauth_proc_fork(p1, p2); p2->p_raslist = NULL; #if defined(__HAVE_RAS) ras_fork(p1, p2); #endif /* bump references to the text vnode (for procfs) */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) vref(p2->p_textvp); if (flags & FORK_SHAREFILES) fd_share(p2); else if (flags & FORK_CLEANFILES) p2->p_fd = fd_init(NULL); else p2->p_fd = fd_copy(); /* XXX racy */ p2->p_mqueue_cnt = p1->p_mqueue_cnt; if (flags & FORK_SHARECWD) cwdshare(p2); else p2->p_cwdi = cwdinit(); /* * Note: p_limit (rlimit stuff) is copy-on-write, so normally * we just need increase pl_refcnt. */ p1_lim = p1->p_limit; if (!p1_lim->pl_writeable) { lim_addref(p1_lim); p2->p_limit = p1_lim; } else { p2->p_limit = lim_copy(p1_lim); } if (flags & FORK_PPWAIT) { /* Mark ourselves as waiting for a child. */ l1->l_pflag |= LP_VFORKWAIT; p2->p_lflag = PL_PPWAIT; p2->p_vforklwp = l1; } else { p2->p_lflag = 0; } p2->p_sflag = 0; p2->p_slflag = 0; parent = (flags & FORK_NOWAIT) ? initproc : p1; p2->p_pptr = parent; p2->p_ppid = parent->p_pid; LIST_INIT(&p2->p_children); p2->p_aio = NULL; #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. * If not inherited, these were zeroed above. */ if (p1->p_traceflag & KTRFAC_INHERIT) { mutex_enter(&ktrace_lock); p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracep = p1->p_tracep) != NULL) ktradref(p2); mutex_exit(&ktrace_lock); } #endif /* * Create signal actions for the child process. */ p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS); mutex_enter(p1->p_lock); p2->p_sflag |= (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP)); sched_proc_fork(p1, p2); mutex_exit(p1->p_lock); p2->p_stflag = p1->p_stflag; /* * p_stats. * Copy parts of p_stats, and zero out the rest. */ p2->p_stats = pstatscopy(p1->p_stats); /* * Set up the new process address space. */ uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false); /* * Finish creating the child process. * It will return through a different path later. */ lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0, stack, stacksize, (func != NULL) ? func : child_return, arg, &l2, l1->l_class); /* * Inherit l_private from the parent. * Note that we cannot use lwp_setprivate() here since that * also sets the CPU TLS register, which is incorrect if the * process has changed that without letting the kernel know. */ l2->l_private = l1->l_private; /* * If emulation has a process fork hook, call it now. */ if (p2->p_emul->e_proc_fork) (*p2->p_emul->e_proc_fork)(p2, l1, flags); /* * ...and finally, any other random fork hooks that subsystems * might have registered. */ doforkhooks(p2, p1); SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0); /* * It's now safe for the scheduler and other processes to see the * child process. */ mutex_enter(proc_lock); if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT) p2->p_lflag |= PL_CONTROLT; LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling); p2->p_exitsig = exitsig; /* signal for parent on exit */ /* * We don't want to tracefork vfork()ed processes because they * will not receive the SIGTRAP until it is too late. */ tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) == (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0; if (tracefork) { p2->p_slflag |= PSL_TRACED; p2->p_opptr = p2->p_pptr; if (p2->p_pptr != p1->p_pptr) { struct proc *parent1 = p2->p_pptr; if (parent1->p_lock < p2->p_lock) { if (!mutex_tryenter(parent1->p_lock)) { mutex_exit(p2->p_lock); mutex_enter(parent1->p_lock); } } else if (parent1->p_lock > p2->p_lock) { mutex_enter(parent1->p_lock); } parent1->p_slflag |= PSL_CHTRACED; proc_reparent(p2, p1->p_pptr); if (parent1->p_lock != p2->p_lock) mutex_exit(parent1->p_lock); } /* * Set ptrace status. */ p1->p_fpid = p2->p_pid; p2->p_fpid = p1->p_pid; } LIST_INSERT_AFTER(p1, p2, p_pglist); LIST_INSERT_HEAD(&allproc, p2, p_list); p2->p_trace_enabled = trace_is_enabled(p2); #ifdef __HAVE_SYSCALL_INTERN (*p2->p_emul->e_syscall_intern)(p2); #endif /* * Update stats now that we know the fork was successful. */ uvmexp.forks++; if (flags & FORK_PPWAIT) uvmexp.forks_ppwait++; if (flags & FORK_SHAREVM) uvmexp.forks_sharevm++; /* * Pass a pointer to the new process to the caller. */ if (rnewprocp != NULL) *rnewprocp = p2; if (ktrpoint(KTR_EMUL)) p2->p_traceflag |= KTRFAC_TRC_EMUL; /* * Notify any interested parties about the new process. */ if (!SLIST_EMPTY(&p1->p_klist)) { mutex_exit(proc_lock); KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); mutex_enter(proc_lock); } /* * Make child runnable, set start time, and add to run queue except * if the parent requested the child to start in SSTOP state. */ mutex_enter(p2->p_lock); /* * Start profiling. */ if ((p2->p_stflag & PST_PROFIL) != 0) { mutex_spin_enter(&p2->p_stmutex); startprofclock(p2); mutex_spin_exit(&p2->p_stmutex); } getmicrotime(&p2->p_stats->p_start); p2->p_acflag = AFORK; lwp_lock(l2); KASSERT(p2->p_nrlwps == 1); if (p2->p_sflag & PS_STOPFORK) { struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate; p2->p_nrlwps = 0; p2->p_stat = SSTOP; p2->p_waited = 0; p1->p_nstopchild++; l2->l_stat = LSSTOP; KASSERT(l2->l_wchan == NULL); lwp_unlock_to(l2, spc->spc_lwplock); } else { p2->p_nrlwps = 1; p2->p_stat = SACTIVE; l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } /* * Return child pid to parent process, * marking us as parent via retval[1]. */ if (retval != NULL) { retval[0] = p2->p_pid; retval[1] = 0; } mutex_exit(p2->p_lock); /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, sleep until it clears LP_VFORKWAIT. */ #if 0 while (l1->l_pflag & LP_VFORKWAIT) { cv_wait(&l1->l_waitcv, proc_lock); } #else while (p2->p_lflag & PL_PPWAIT) cv_wait(&p1->p_waitcv, proc_lock); #endif /* * Let the parent know that we are tracing its child. */ if (tracefork) { ksiginfo_t ksi; KSI_INIT_EMPTY(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_lid = l1->l_lid; kpsignal(p1, &ksi, NULL); } mutex_exit(proc_lock); return 0; }
* thread.c: thread implementation * Refered to ReactOS code */ #include "mutex.h" #include "unistr.h" #include "attach.h" #include "semaphore.h" #include "thread.h" #include "wineserver/lib.h" #ifdef CONFIG_UNIFIED_KERNEL POBJECT_TYPE thread_object_type = NULL; EXPORT_SYMBOL(thread_object_type); struct list_head thread_list = LIST_INIT(thread_list); static void thread_close(struct ethread *); static int thread_signal(struct ethread *, int); static void thread_exit(struct ethread *, int); static void thread_execve(struct ethread *); static void thread_fork(struct ethread *, struct task_struct *, struct task_struct *, unsigned long); extern void do_exit_task(struct task_struct *tsk, long code); extern long do_fork_from_task(struct task_struct *ptsk, unsigned long process_flags, unsigned long clone_flags, unsigned long stack_start,
int rsys_init(struct rsys* rsys, struct rsys_table* table, struct esys* esys) { if(rsys->magic == OCN_MAGIC) return OCN_FAIL; /* rendersystem table */ rsys->table = *table; if(rsys->table.win_init != NULL) if(rsys->table.win_init() != OCN_OK) return OCN_WINDOW_ERR_CREATE; if(rsys->table.ctt1_init != NULL) { if(rsys->table.ctt1_init() != OCN_OK) { if(rsys->table.win_dest != NULL) rsys->table.win_dest(); return OCN_CONTEXT_ERR_CREATE; } } if(rsys->table.ctt2_init != NULL) { if(rsys->table.ctt2_init() != OCN_OK) { if(rsys->table.ctt1_dest != NULL) rsys->table.ctt1_dest(); if(rsys->table.win_dest != NULL) rsys->table.win_dest(); return OCN_CONTEXT_ERR_CREATE; } } if(rsys->table.glew_init != NULL) { if(rsys->table.glew_init() != OCN_OK) { if(rsys->table.ctt1_dest != NULL) rsys->table.ctt1_dest(); if(rsys->table.ctt2_dest != NULL) rsys->table.ctt2_dest(); if(rsys->table.win_dest != NULL) rsys->table.win_dest(); return OCN_GLEW_ERR; } } /* render queue */ ocn_spin_init(&rsys->render_reqs_lock); queue_init (&rsys->render_reqs, OCN_RSYS_BUFFER_SIZE); /* worker queue */ ocn_spin_init(&rsys->worker_reqs_lock); queue_init (&rsys->worker_reqs, OCN_RSYS_BUFFER_SIZE); ocn_thread_create(&rsys->worker_thread, rsys_worker, rsys); ocn_thread_create(&rsys->render_thread, rsys_render, rsys); /* ro_assets */ ocn_spin_init(&rsys->ro_assets_lock); for(uint64_t u = 0lu; u < OCN_MAX_ASSETS; u++) rsys->ro_assets[u].flags = 0lu; /* ro_entities */ ocn_spin_init(&rsys->ro_entities_lock); for(uint64_t u = 0lu; u < OCN_MAX_ENTITIES; u++) rsys->ro_entities[u].flags = 0lu; LIST_INIT(&rsys->ros) frame_init(&rsys->frame); rsys->bg_red = 0.5f; rsys->bg_green = 0.5f; rsys->bg_blue = 0.5f; rsys->bg_alpha = 1.0f; rsys->magic = OCN_MAGIC; return OCN_OK; }
uint32_t dev_class; uint32_t reg_base[6]; uint32_t reg_size[6]; uint8_t irq_line; uint8_t irq_pin; struct list link; }; struct pci_bus { struct pci_func *parent_bridge; uint32_t busno; }; static struct list pci_func_list = LIST_INIT(pci_func_list); static paddr_t pci_map_base = CONFIG_PCI_MMIO_ALLOC_BASE; static void pci_conf1_set_addr(uint32_t bus, uint32_t dev, uint32_t func, uint32_t offset) { ASSERT(bus < 256); ASSERT(dev < 32); ASSERT(func < 8); ASSERT(offset < 256); ASSERT((offset & 0x3) == 0); uint32_t v = (1 << 31) | /* config-space */
#include "external.h" #include "list.h" #include "realtime.h" #include "rig.h" #include "status.h" #include "track.h" #define RATE 44100 #define SAMPLE (sizeof(signed short) * TRACK_CHANNELS) /* bytes per sample */ #define TRACK_BLOCK_PCM_BYTES (TRACK_BLOCK_SAMPLES * SAMPLE) #define _STR(tok) #tok #define STR(tok) _STR(tok) static struct list tracks = LIST_INIT(tracks); static bool use_mlock = false; /* * An empty track is used rarely, and is easier than * continuous checks for NULL throughout the code */ static struct track empty = { .refcount = 1, .rate = RATE, .bytes = 0, .length = 0, .blocks = 0,
/* * Called from boot code to establish ppp interfaces. */ void pppattach() { LIST_INIT(&ppp_softc_list); if_clone_attach(&ppp_cloner); }
int main( int argc, char **argv) { extern char *optarg; CLIENT *cl; int ch, ret; char *passwd; prog = argv[0]; version_check(); /* * Check whether another server is running or not. There * is a race condition where two servers could be racing to * register with the portmapper. The goal of this check is to * forbid running additional servers (like those started from * the test suite) if the user is already running one. * * XXX * This does not solve nor prevent two servers from being * started at the same time and running recovery at the same * time on the same environments. */ if ((cl = clnt_create("localhost", DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) { fprintf(stderr, "%s: Berkeley DB RPC server already running.\n", prog); clnt_destroy(cl); return (EXIT_FAILURE); } LIST_INIT(&__dbsrv_home); while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF) switch (ch) { case 'h': (void)add_home(optarg); break; case 'I': if (__db_getlong(NULL, prog, optarg, 1, LONG_MAX, &__dbsrv_idleto)) return (EXIT_FAILURE); break; case 'L': logfile = optarg; break; case 'P': passwd = strdup(optarg); memset(optarg, 0, strlen(optarg)); if (passwd == NULL) { fprintf(stderr, "%s: strdup: %s\n", prog, strerror(errno)); return (EXIT_FAILURE); } if ((ret = add_passwd(passwd)) != 0) { fprintf(stderr, "%s: strdup: %s\n", prog, strerror(ret)); return (EXIT_FAILURE); } break; case 't': if (__db_getlong(NULL, prog, optarg, 1, LONG_MAX, &__dbsrv_defto)) return (EXIT_FAILURE); break; case 'T': if (__db_getlong(NULL, prog, optarg, 1, LONG_MAX, &__dbsrv_maxto)) return (EXIT_FAILURE); break; case 'V': printf("%s\n", db_version(NULL, NULL, NULL)); return (EXIT_SUCCESS); case 'v': __dbsrv_verbose = 1; break; default: usage(prog); } /* * Check default timeout against maximum timeout */ if (__dbsrv_defto > __dbsrv_maxto) __dbsrv_defto = __dbsrv_maxto; /* * Check default timeout against idle timeout * It would be bad to timeout environments sooner than txns. */ if (__dbsrv_defto > __dbsrv_idleto) fprintf(stderr, "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n", prog, __dbsrv_idleto, __dbsrv_defto); LIST_INIT(&__dbsrv_head); /* * If a client crashes during an RPC, our reply to it * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily. */ #ifdef SIGPIPE signal(SIGPIPE, SIG_IGN); #endif if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile)) return (EXIT_FAILURE); /* * Now that we are ready to start, run recovery on all the * environments specified. */ if (env_recover(prog) != 0) return (EXIT_FAILURE); /* * We've done our setup, now call the generated server loop */ if (__dbsrv_verbose) printf("%s: Ready to receive requests\n", prog); __dbsrv_main(); /* NOTREACHED */ abort(); }
/* Perform the most basic initialization of a proxy : * memset(), list_init(*), reset_timeouts(*). * Any new proxy or peer should be initialized via this function. */ void init_new_proxy(struct proxy *p) { memset(p, 0, sizeof(struct proxy)); LIST_INIT(&p->pendconns); LIST_INIT(&p->acl); LIST_INIT(&p->http_req_rules); LIST_INIT(&p->block_cond); LIST_INIT(&p->redirect_rules); LIST_INIT(&p->mon_fail_cond); LIST_INIT(&p->switching_rules); LIST_INIT(&p->server_rules); LIST_INIT(&p->persist_rules); LIST_INIT(&p->sticking_rules); LIST_INIT(&p->storersp_rules); LIST_INIT(&p->tcp_req.inspect_rules); LIST_INIT(&p->tcp_rep.inspect_rules); LIST_INIT(&p->tcp_req.l4_rules); LIST_INIT(&p->req_add); LIST_INIT(&p->rsp_add); LIST_INIT(&p->listener_queue); LIST_INIT(&p->logsrvs); LIST_INIT(&p->logformat); LIST_INIT(&p->format_unique_id); /* Timeouts are defined as -1 */ proxy_reset_timeouts(p); p->tcp_rep.inspect_delay = TICK_ETERNITY; }
vmem_t * vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) { int i; KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); KASSERT(quantum > 0); #if defined(_KERNEL) /* XXX: SMP, we get called early... */ if (!vmem_bootstrapped) { vmem_bootstrap(); } #endif /* defined(_KERNEL) */ if (vm == NULL) { vm = xmalloc(sizeof(*vm), flags); } if (vm == NULL) { return NULL; } VMEM_CONDVAR_INIT(vm, "vmem"); VMEM_LOCK_INIT(vm, ipl); vm->vm_flags = flags; vm->vm_nfreetags = 0; LIST_INIT(&vm->vm_freetags); strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); vm->vm_quantum_mask = quantum - 1; vm->vm_quantum_shift = SIZE2ORDER(quantum); KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); vm->vm_importfn = importfn; vm->vm_releasefn = releasefn; vm->vm_arg = arg; vm->vm_nbusytag = 0; vm->vm_size = 0; vm->vm_inuse = 0; #if defined(QCACHE) qc_init(vm, qcache_max, ipl); #endif /* defined(QCACHE) */ TAILQ_INIT(&vm->vm_seglist); for (i = 0; i < VMEM_MAXORDER; i++) { LIST_INIT(&vm->vm_freelist[i]); } memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); vm->vm_hashsize = 1; vm->vm_hashlist = &vm->vm_hash0; if (size != 0) { if (vmem_add(vm, base, size, flags) != 0) { vmem_destroy1(vm); return NULL; } } #if defined(_KERNEL) if (flags & VM_BOOTSTRAP) { bt_refill(vm); } mutex_enter(&vmem_list_lock); LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); mutex_exit(&vmem_list_lock); #endif /* defined(_KERNEL) */ return vm; }
WINE_DEFAULT_DEBUG_CHANNEL(explorer); #ifdef HAVE_LIBHAL #include <dbus/dbus.h> #include <hal/libhal.h> struct dos_drive { struct list entry; char *udi; int drive; }; static struct list drives_list = LIST_INIT(drives_list); #define DBUS_FUNCS \ DO_FUNC(dbus_bus_get); \ DO_FUNC(dbus_connection_close); \ DO_FUNC(dbus_connection_read_write_dispatch); \ DO_FUNC(dbus_error_init); \ DO_FUNC(dbus_error_free); \ DO_FUNC(dbus_error_is_set) #define HAL_FUNCS \ DO_FUNC(libhal_ctx_free); \ DO_FUNC(libhal_ctx_init); \ DO_FUNC(libhal_ctx_new); \ DO_FUNC(libhal_ctx_set_dbus_connection); \
/*------------------------------------------------------------------------* * usb_attach_sub * * This function creates a thread which runs the USB attach code. *------------------------------------------------------------------------*/ static void usb_attach_sub(device_t dev, struct usb_bus *bus) { mtx_lock(&Giant); if (usb_devclass_ptr == NULL) usb_devclass_ptr = devclass_find("usbus"); mtx_unlock(&Giant); #if USB_HAVE_PF usbpf_attach(bus); #endif /* Initialise USB process messages */ bus->explore_msg[0].hdr.pm_callback = &usb_bus_explore; bus->explore_msg[0].bus = bus; bus->explore_msg[1].hdr.pm_callback = &usb_bus_explore; bus->explore_msg[1].bus = bus; bus->detach_msg[0].hdr.pm_callback = &usb_bus_detach; bus->detach_msg[0].bus = bus; bus->detach_msg[1].hdr.pm_callback = &usb_bus_detach; bus->detach_msg[1].bus = bus; bus->attach_msg[0].hdr.pm_callback = &usb_bus_attach; bus->attach_msg[0].bus = bus; bus->attach_msg[1].hdr.pm_callback = &usb_bus_attach; bus->attach_msg[1].bus = bus; bus->suspend_msg[0].hdr.pm_callback = &usb_bus_suspend; bus->suspend_msg[0].bus = bus; bus->suspend_msg[1].hdr.pm_callback = &usb_bus_suspend; bus->suspend_msg[1].bus = bus; bus->resume_msg[0].hdr.pm_callback = &usb_bus_resume; bus->resume_msg[0].bus = bus; bus->resume_msg[1].hdr.pm_callback = &usb_bus_resume; bus->resume_msg[1].bus = bus; bus->reset_msg[0].hdr.pm_callback = &usb_bus_reset; bus->reset_msg[0].bus = bus; bus->reset_msg[1].hdr.pm_callback = &usb_bus_reset; bus->reset_msg[1].bus = bus; bus->shutdown_msg[0].hdr.pm_callback = &usb_bus_shutdown; bus->shutdown_msg[0].bus = bus; bus->shutdown_msg[1].hdr.pm_callback = &usb_bus_shutdown; bus->shutdown_msg[1].bus = bus; #if USB_HAVE_UGEN LIST_INIT(&bus->pd_cleanup_list); bus->cleanup_msg[0].hdr.pm_callback = &usb_bus_cleanup; bus->cleanup_msg[0].bus = bus; bus->cleanup_msg[1].hdr.pm_callback = &usb_bus_cleanup; bus->cleanup_msg[1].bus = bus; #endif #if USB_HAVE_PER_BUS_PROCESS /* Create USB explore and callback processes */ if (usb_proc_create(USB_BUS_GIANT_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB Giant " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_NON_GIANT_ISOC_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGHEST)) { device_printf(dev, "WARNING: Creation of USB non-Giant ISOC " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_NON_GIANT_BULK_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGH)) { device_printf(dev, "WARNING: Creation of USB non-Giant BULK " "callback process failed.\n"); } else if (usb_proc_create(USB_BUS_EXPLORE_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB explore " "process failed.\n"); } else if (usb_proc_create(USB_BUS_CONTROL_XFER_PROC(bus), &bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) { device_printf(dev, "WARNING: Creation of USB control transfer " "process failed.\n"); } else #endif { /* Get final attach going */ USB_BUS_LOCK(bus); usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->attach_msg[0], &bus->attach_msg[1]); USB_BUS_UNLOCK(bus); /* Do initial explore */ usb_needs_explore(bus, 1); } }
static uid_t pw_gidpolicy(struct cargs * args, char *nam, gid_t prefer) { struct group *grp; gid_t gid = (uid_t) - 1; struct carg *a_gid = getarg(args, 'g'); struct userconf *cnf = conf.userconf; /* * If no arg given, see if default can help out */ if (a_gid == NULL && cnf->default_group && *cnf->default_group) a_gid = addarg(args, 'g', cnf->default_group); /* * Check the given gid, if any */ SETGRENT(); if (a_gid != NULL) { if ((grp = GETGRNAM(a_gid->val)) == NULL) { gid = (gid_t) atol(a_gid->val); if ((gid == 0 && !isdigit((unsigned char)*a_gid->val)) || (grp = GETGRGID(gid)) == NULL) errx(EX_NOUSER, "group `%s' is not defined", a_gid->val); } gid = grp->gr_gid; } else if ((grp = GETGRNAM(nam)) != NULL && (grp->gr_mem == NULL || grp->gr_mem[0] == NULL)) { gid = grp->gr_gid; /* Already created? Use it anyway... */ } else { struct cargs grpargs; char tmp[32]; LIST_INIT(&grpargs); /* * We need to auto-create a group with the user's name. We * can send all the appropriate output to our sister routine * bit first see if we can create a group with gid==uid so we * can keep the user and group ids in sync. We purposely do * NOT check the gid range if we can force the sync. If the * user's name dups an existing group, then the group add * function will happily handle that case for us and exit. */ if (GETGRGID(prefer) == NULL) { snprintf(tmp, sizeof(tmp), "%u", prefer); addarg(&grpargs, 'g', tmp); } if (conf.dryrun) { addarg(&grpargs, 'q', NULL); gid = pw_group(M_NEXT, nam, -1, &grpargs); } else { pw_group(M_ADD, nam, -1, &grpargs); if ((grp = GETGRNAM(nam)) != NULL) gid = grp->gr_gid; } a_gid = LIST_FIRST(&grpargs); while (a_gid != NULL) { struct carg *t = LIST_NEXT(a_gid, list); LIST_REMOVE(a_gid, list); a_gid = t; } } ENDGRENT(); return gid; }
0, 0, &uuid_cs, { &critsect_debug.ProcessLocksList, &critsect_debug.ProcessLocksList }, 0, 0, { (DWORD_PTR)(__FILE__ ": uuid_cs") } }; static CRITICAL_SECTION uuid_cs = { &critsect_debug, -1, 0, 0, 0, 0 }; static CRITICAL_SECTION threaddata_cs; static CRITICAL_SECTION_DEBUG threaddata_cs_debug = { 0, 0, &threaddata_cs, { &threaddata_cs_debug.ProcessLocksList, &threaddata_cs_debug.ProcessLocksList }, 0, 0, { (DWORD_PTR)(__FILE__ ": threaddata_cs") } }; static CRITICAL_SECTION threaddata_cs = { &threaddata_cs_debug, -1, 0, 0, 0, 0 }; static struct list threaddata_list = LIST_INIT(threaddata_list); struct context_handle_list { struct context_handle_list *next; NDR_SCONTEXT context_handle; }; struct threaddata { struct list entry; CRITICAL_SECTION cs; DWORD thread_id; RpcConnection *connection; RpcBinding *server_binding; struct context_handle_list *context_handle_list;
/* Validate a proposal inside SA according to EXCHANGE's policy. */ static int ike_phase_1_validate_prop (struct exchange *exchange, struct sa *sa, struct sa *isakmp_sa) { struct conf_list *conf, *tags; struct conf_list_node *xf, *tag; struct proto *proto; struct validation_state vs; struct attr_node *node, *next_node; /* Get the list of transforms. */ conf = conf_get_list (exchange->policy, "Transforms"); if (!conf) return 0; for (xf = TAILQ_FIRST (&conf->fields); xf; xf = TAILQ_NEXT (xf, link)) { for (proto = TAILQ_FIRST (&sa->protos); proto; proto = TAILQ_NEXT (proto, link)) { /* Mark all attributes in our policy as unseen. */ LIST_INIT (&vs.attrs); vs.xf = xf; vs.life = 0; if (attribute_map (proto->chosen->p + ISAKMP_TRANSFORM_SA_ATTRS_OFF, GET_ISAKMP_GEN_LENGTH (proto->chosen->p) - ISAKMP_TRANSFORM_SA_ATTRS_OFF, attribute_unacceptable, &vs)) goto try_next; /* Sweep over unseen tags in this section. */ tags = conf_get_tag_list (xf->field); if (tags) { for (tag = TAILQ_FIRST (&tags->fields); tag; tag = TAILQ_NEXT (tag, link)) /* * XXX Should we care about attributes we have, they do not * provide? */ for (node = LIST_FIRST (&vs.attrs); node; node = next_node) { next_node = LIST_NEXT (node, link); if (node->type == constant_value (ike_attr_cst, tag->field)) { LIST_REMOVE (node, link); free (node); } } conf_free_list (tags); } /* Are there leftover tags in this section? */ node = LIST_FIRST (&vs.attrs); if (node) goto try_next; } /* All protocols were OK, we succeeded. */ LOG_DBG ((LOG_NEGOTIATION, 20, "ike_phase_1_validate_prop: success")); conf_free_list (conf); if (vs.life) free (vs.life); return 1; try_next: /* Are there leftover tags in this section? */ node = LIST_FIRST (&vs.attrs); while (node) { LIST_REMOVE (node, link); free (node); node = LIST_FIRST (&vs.attrs); } if (vs.life) free (vs.life); } LOG_DBG ((LOG_NEGOTIATION, 20, "ike_phase_1_validate_prop: failure")); conf_free_list (conf); return 0; }
static int dirty_rect_counter = 0; struct surface { SDL_Surface *surf; }; typedef struct { list_elm_t elm; const sprite_t *sprite; const sprite_t *mask; int color_off; surface_t *surface; } cached_surface_t; static list_t transp_sprite_cache = LIST_INIT(transp_sprite_cache); static list_t overlay_sprite_cache = LIST_INIT(overlay_sprite_cache); static list_t masked_sprite_cache = LIST_INIT(masked_sprite_cache); int sdl_init() { /* Initialize defaults and Video subsystem */ if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO) < 0) { LOGE("Unable to initialize SDL: %s.", SDL_GetError()); return -1; } /* Display program name and version in caption */ char caption[64];
int main(int argc, char **argv) { extern char *__progname; char *cfgfile = 0; int ch; if (geteuid() != 0) { /* No point in continuing. */ fprintf(stderr, "%s: This daemon needs to be run as root.\n", __progname); return 1; } while ((ch = getopt(argc, argv, "c:dv")) != -1) { switch (ch) { case 'c': if (cfgfile) usage(); cfgfile = optarg; break; case 'd': cfgstate.debug++; break; case 'v': cfgstate.verboselevel++; break; default: usage(); } } argc -= optind; argv += optind; if (argc > 0) usage(); log_init(__progname); timer_init(); cfgstate.runstate = INIT; LIST_INIT(&cfgstate.peerlist); cfgstate.listen_port = SASYNCD_DEFAULT_PORT; cfgstate.flags |= CTL_DEFAULT; if (!cfgfile) cfgfile = SASYNCD_CFGFILE; if (conf_parse_file(cfgfile) == 0 ) { if (!cfgstate.sharedkey) { fprintf(stderr, "config: " "no shared key specified, cannot continue"); exit(1); } } else { exit(1); } carp_demote(CARP_INC, 0); if (carp_init()) return 1; if (pfkey_init(0)) return 1; if (net_init()) return 1; if (!cfgstate.debug) if (daemon(1, 0)) { perror("daemon()"); exit(1); } if (monitor_init()) { /* Parent, with privileges. */ monitor_loop(); exit(0); } /* Child, no privileges left. Run main loop. */ sasyncd_run(getppid()); /* Shutdown. */ log_msg(0, "shutting down..."); net_shutdown(); pfkey_shutdown(); return 0; }
#include "config.h" #include "wine/port.h" #define DDRAW_INIT_GUID #include "ddraw_private.h" #include "rpcproxy.h" #include "wine/exception.h" #include "winreg.h" WINE_DEFAULT_DEBUG_CHANNEL(ddraw); /* The configured default surface */ enum ddraw_surface_type DefaultSurfaceType = DDRAW_SURFACE_TYPE_OPENGL; static struct list global_ddraw_list = LIST_INIT(global_ddraw_list); static HINSTANCE instance; /* value of ForceRefreshRate */ DWORD force_refresh_rate = 0; /* Structure for converting DirectDrawEnumerateA to DirectDrawEnumerateExA */ struct callback_info { LPDDENUMCALLBACKA callback; void *context; }; /* Enumeration callback for converting DirectDrawEnumerateA to DirectDrawEnumerateExA */ static HRESULT CALLBACK enum_callback(GUID *guid, char *description, char *driver_name,
#include "rpc.h" #include "rpcndr.h" #include "unknwn.h" #include "oleidl.h" #include "shobjidl.h" #include "initguid.h" #include "ksmedia.h" #include "propkey.h" #include "devpkey.h" #include "dsound_private.h" WINE_DEFAULT_DEBUG_CHANNEL(dsound); struct list DSOUND_renderers = LIST_INIT(DSOUND_renderers); CRITICAL_SECTION DSOUND_renderers_lock; static CRITICAL_SECTION_DEBUG DSOUND_renderers_lock_debug = { 0, 0, &DSOUND_renderers_lock, { &DSOUND_renderers_lock_debug.ProcessLocksList, &DSOUND_renderers_lock_debug.ProcessLocksList }, 0, 0, { (DWORD_PTR)(__FILE__ ": DSOUND_renderers_lock") } }; CRITICAL_SECTION DSOUND_renderers_lock = { &DSOUND_renderers_lock_debug, -1, 0, 0, 0, 0 }; struct list DSOUND_capturers = LIST_INIT(DSOUND_capturers); CRITICAL_SECTION DSOUND_capturers_lock; static CRITICAL_SECTION_DEBUG DSOUND_capturers_lock_debug = { 0, 0, &DSOUND_capturers_lock, { &DSOUND_capturers_lock_debug.ProcessLocksList, &DSOUND_capturers_lock_debug.ProcessLocksList },
/* ARGSUSED */ void gifattach(int count) { LIST_INIT(&gif_softc_list); if_clone_attach(&gif_cloner); }
/* * Initialize a given port using default settings and with the RX buffers * coming from the mbuf_pool passed as a parameter. * FIXME: Starting with assumption of one thread/core per port */ static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port, struct rte_mempool *rx_mbuf_pool, unsigned int mtu) { int retval; /* Check for a valid port */ if (port->id >= rte_eth_dev_count()) return -ENODEV; /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */ /* FIXME: Check if hw_ip_checksum is possible */ struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = mtu, .jumbo_frame = 1, .hw_ip_checksum = 1, } }; retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf); if (retval != 0) return retval; retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE, rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool); if (retval < 0) return retval; retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE, rte_eth_dev_socket_id(port->id), NULL); if (retval < 0) goto port_init_fail; /* Create the hash table for the RX sockets */ char name[32]; snprintf(name, sizeof(name), "rx_table_%u", port->id); struct rte_hash_parameters hash_params = { .name = name, .entries = UHD_DPDK_MAX_SOCKET_CNT, .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple), .hash_func = NULL, .hash_func_init_val = 0, }; port->rx_table = rte_hash_create(&hash_params); if (port->rx_table == NULL) { retval = rte_errno; goto port_init_fail; } /* Create ARP table */ snprintf(name, sizeof(name), "arp_table_%u", port->id); hash_params.name = name; hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT; hash_params.key_len = sizeof(uint32_t); hash_params.hash_func = NULL; hash_params.hash_func_init_val = 0; port->arp_table = rte_hash_create(&hash_params); if (port->arp_table == NULL) { retval = rte_errno; goto free_rx_table; } /* Set up list for TX queues */ LIST_INIT(&port->txq_list); /* Start the Ethernet port. */ retval = rte_eth_dev_start(port->id); if (retval < 0) { goto free_arp_table; } /* Display the port MAC address. */ rte_eth_macaddr_get(port->id, &port->mac_addr); RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n", (unsigned)port->id, port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1], port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3], port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]); struct rte_eth_link link; rte_eth_link_get(port->id, &link); RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status); return 0; free_arp_table: rte_hash_free(port->arp_table); free_rx_table: rte_hash_free(port->rx_table); port_init_fail: return rte_errno; } static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id) { if (!ctx || !thread) return -EINVAL; unsigned int socket_id = rte_lcore_to_socket_id(id); thread->id = id; thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id]; thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id]; LIST_INIT(&thread->port_list); char name[32]; snprintf(name, sizeof(name), "sockreq_ring_%u", id); thread->sock_req_ring = rte_ring_create( name, UHD_DPDK_MAX_PENDING_SOCK_REQS, socket_id, RING_F_SC_DEQ ); if (!thread->sock_req_ring) return -ENOMEM; return 0; } int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports, int *port_thread_mapping, int num_mbufs, int mbuf_cache_size, int mtu) { /* Init context only once */ if (ctx) return 1; if ((num_ports == 0) || (port_thread_mapping == NULL)) { return -EINVAL; } /* Grabs arguments intended for DPDK's EAL */ int ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id()); if (!ctx) return -ENOMEM; ctx->num_threads = rte_lcore_count(); if (ctx->num_threads <= 1) rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n"); /* Check that we have ports to send/receive on */ ctx->num_ports = rte_eth_dev_count(); if (ctx->num_ports < 1) rte_exit(EXIT_FAILURE, "Error: Found no ports\n"); if (ctx->num_ports < num_ports) rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n"); /* Get memory for thread and port data structures */ ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0); if (!ctx->threads) rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n"); ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0); if (!ctx->ports) rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n"); /* Initialize the thread data structures */ for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { /* Do one mempool of RX/TX per socket */ unsigned int socket_id = rte_lcore_to_socket_id(i); /* FIXME Probably want to take into account actual number of ports per socket */ if (ctx->tx_pktbuf_pools[socket_id] == NULL) { /* Creates a new mempool in memory to hold the mbufs. * This is done for each CPU socket */ const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM; char name[32]; snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id); ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create( name, ctx->num_ports*num_mbufs, mbuf_cache_size, 0, mbuf_size, socket_id ); snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id); ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create( name, ctx->num_ports*num_mbufs, mbuf_cache_size, 0, mbuf_size, socket_id ); if ((ctx->rx_pktbuf_pools[socket_id]== NULL) || (ctx->tx_pktbuf_pools[socket_id]== NULL)) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); } if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0) rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i); } unsigned master_lcore = rte_get_master_lcore(); /* Assign ports to threads and initialize the port data structures */ for (unsigned int i = 0; i < num_ports; i++) { int thread_id = port_thread_mapping[i]; if (thread_id < 0) continue; if (((unsigned int) thread_id) == master_lcore) RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i); if (ctx->threads[thread_id].id != (unsigned int) thread_id) rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i); struct uhd_dpdk_port *port = &ctx->ports[i]; port->id = i; port->parent = &ctx->threads[thread_id]; ctx->threads[thread_id].num_ports++; LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry); /* Initialize port. */ if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0) rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", i); } RTE_LOG(INFO, EAL, "Init DONE!\n"); /* FIXME: Create functions to do this */ RTE_LOG(INFO, EAL, "Starting I/O threads!\n"); for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { struct uhd_dpdk_thread *t = &ctx->threads[i]; if (!LIST_EMPTY(&t->port_list)) { rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id); } } return 0; } /* FIXME: This will be changed once we have functions to handle the threads */ int uhd_dpdk_destroy(void) { if (!ctx) return -ENODEV; struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0); if (!req) return -ENOMEM; req->req_type = UHD_DPDK_LCORE_TERM; for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { struct uhd_dpdk_thread *t = &ctx->threads[i]; if (LIST_EMPTY(&t->port_list)) continue; if (rte_eal_get_lcore_state(t->id) == FINISHED) continue; pthread_mutex_init(&req->mutex, NULL); pthread_cond_init(&req->cond, NULL); pthread_mutex_lock(&req->mutex); if (rte_ring_enqueue(t->sock_req_ring, req)) { pthread_mutex_unlock(&req->mutex); RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i); rte_free(req); return -ENOSPC; } struct timespec timeout = { .tv_sec = 1, .tv_nsec = 0 }; pthread_cond_timedwait(&req->cond, &req->mutex, &timeout); pthread_mutex_unlock(&req->mutex); } rte_free(req); return 0; }
RC_PIDLIST * rc_find_pids(const char *exec, const char *const *argv, uid_t uid, pid_t pid) { DIR *procdir; struct dirent *entry; FILE *fp; bool container_pid = false; bool openvz_host = false; char *line = NULL; size_t len = 0; pid_t p; char buffer[PATH_MAX]; struct stat sb; pid_t runscript_pid = 0; char *pp; RC_PIDLIST *pids = NULL; RC_PID *pi; if ((procdir = opendir("/proc")) == NULL) return NULL; /* We never match RC_RUNSCRIPT_PID if present so we avoid the below scenario /etc/init.d/ntpd stop does start-stop-daemon --stop --name ntpd catching /etc/init.d/ntpd stop nasty */ if ((pp = getenv("RC_RUNSCRIPT_PID"))) { if (sscanf(pp, "%d", &runscript_pid) != 1) runscript_pid = 0; } /* If /proc/self/status contains EnvID: 0, then we are an OpenVZ host, and we will need to filter out processes that are inside containers from our list of pids. */ if (exists("/proc/self/status")) { fp = fopen("/proc/self/status", "r"); if (fp) { while (! feof(fp)) { rc_getline(&line, &len, fp); if (strncmp(line, "envID:\t0", 8) == 0) { openvz_host = true; break; } } fclose(fp); } } while ((entry = readdir(procdir)) != NULL) { if (sscanf(entry->d_name, "%d", &p) != 1) continue; if (runscript_pid != 0 && runscript_pid == p) continue; if (pid != 0 && pid != p) continue; if (uid) { snprintf(buffer, sizeof(buffer), "/proc/%d", p); if (stat(buffer, &sb) != 0 || sb.st_uid != uid) continue; } if (exec && !pid_is_exec(p, exec)) continue; if (argv && !pid_is_argv(p, (const char *const *)argv)) continue; /* If this is an OpenVZ host, filter out container processes */ if (openvz_host) { snprintf(buffer, sizeof(buffer), "/proc/%d/status", p); if (exists(buffer)) { fp = fopen(buffer, "r"); if (! fp) continue; while (! feof(fp)) { rc_getline(&line, &len, fp); if (strncmp(line, "envID:", 6) == 0) { container_pid = ! (strncmp(line, "envID:\t0", 8) == 0); break; } } fclose(fp); } } if (container_pid) continue; if (!pids) { pids = xmalloc(sizeof(*pids)); LIST_INIT(pids); } pi = xmalloc(sizeof(*pi)); pi->pid = p; LIST_INSERT_HEAD(pids, pi, entries); } if (line != NULL) free(line); closedir(procdir); return pids; }