void mpegts_mux_bouquet_rescan ( const char *src, const char *extra ) { #if ENABLE_MPEGTS_DVB mpegts_network_t *mn; mpegts_mux_t *mm; ssize_t l; const idclass_t *ic; uint32_t freq; int satpos; #endif if (!src) return; #if ENABLE_MPEGTS_DVB if ((l = startswith(src, "dvb-bouquet://dvbs,")) > 0) { uint32_t tsid, nbid; src += l; if ((satpos = dvb_sat_position_from_str(src)) == INT_MAX) return; while (*src && *src != ',') src++; if (sscanf(src, ",%x,%x", &tsid, &nbid) != 2) return; LIST_FOREACH(mn, &mpegts_network_all, mn_global_link) LIST_FOREACH(mm, &mn->mn_muxes, mm_network_link) if (idnode_is_instance(&mm->mm_id, &dvb_mux_dvbs_class) && mm->mm_tsid == tsid && ((dvb_mux_t *)mm)->lm_tuning.u.dmc_fe_qpsk.orbital_pos == satpos) mpegts_mux_scan_state_set(mm, MM_SCAN_STATE_PEND); return; }
int num_stableptrs(void) { int count = 0; struct StablePtr *sp; LIST_FOREACH(sp, &root_StablePtrs, link) count++; return count; }
/* * Called by UFS when an inode is no longer active and should have its * attributes stripped. */ void ufs_extattr_vnode_inactive(struct vnode *vp, struct thread *td) { struct ufs_extattr_list_entry *uele; struct mount *mp = vp->v_mount; struct ufsmount *ump = VFSTOUFS(mp); /* * In that case, we cannot lock. We should not have any active vnodes * on the fs if this is not yet initialized but is going to be, so * this can go unlocked. */ if (!(ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)) return; ufs_extattr_uepm_lock(ump); if (!(ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)) { ufs_extattr_uepm_unlock(ump); return; } LIST_FOREACH(uele, &ump->um_extattr.uepm_list, uele_entries) ufs_extattr_rm(vp, uele->uele_attrnamespace, uele->uele_attrname, NULL, td); ufs_extattr_uepm_unlock(ump); }
static int manager_network_read_link_servers(Manager *m) { _cleanup_strv_free_ char **ntp = NULL; ServerName *n, *nx; char **i; int r; assert(m); r = sd_network_get_ntp(&ntp); if (r < 0) goto clear; LIST_FOREACH(names, n, m->link_servers) n->marked = true; STRV_FOREACH(i, ntp) { bool found = false; LIST_FOREACH(names, n, m->link_servers) if (streq(n->string, *i)) { n->marked = false; found = true; break; } if (!found) { r = server_name_new(m, NULL, SERVER_LINK, *i); if (r < 0) goto clear; } }
static int _pyepg_parse_schedule ( epggrab_module_t *mod, htsmsg_t *data, epggrab_stats_t *stats ) { int save = 0; htsmsg_t *attr, *tags; htsmsg_field_t *f; epggrab_channel_t *ec; const char *str; epggrab_channel_link_t *ecl; if ( data == NULL ) return 0; if ((attr = htsmsg_get_map(data, "attrib")) == NULL) return 0; if ((str = htsmsg_get_str(attr, "channel")) == NULL) return 0; if ((ec = _pyepg_channel_find(str, 0, NULL)) == NULL) return 0; if ((tags = htsmsg_get_map(data, "tags")) == NULL) return 0; HTSMSG_FOREACH(f, tags) { if (strcmp(f->hmf_name, "broadcast") == 0) { LIST_FOREACH(ecl, &ec->channels, ecl_epg_link) save |= _pyepg_parse_broadcast(mod, htsmsg_get_map_by_field(f), ecl->ecl_channel, stats); } } return save; }
static void db_show_rm(const struct lock_object *lock) { struct rm_priotracker *tr; struct rm_queue *queue; const struct rmlock *rm; struct lock_class *lc; struct pcpu *pc; rm = (const struct rmlock *)lock; db_printf(" writecpus: "); ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); db_printf("\n"); db_printf(" per-CPU readers:\n"); STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tr = (struct rm_priotracker *)queue; if (tr->rmp_rmlock == rm) print_tracker(tr); } db_printf(" active readers:\n"); LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) print_tracker(tr); lc = LOCK_CLASS(&rm->rm_wlock_object); db_printf("Backing write-lock (%s):\n", lc->lc_name); lc->lc_ddb_show(&rm->rm_wlock_object); }
/** * Parse a <programme> tag from xmltv */ static int _xmltv_parse_programme (epggrab_module_t *mod, htsmsg_t *body, epggrab_stats_t *stats) { int save = 0; htsmsg_t *attribs, *tags; const char *s, *chid; time_t start, stop; epggrab_channel_t *ch; epggrab_channel_link_t *ecl; if(body == NULL) return 0; if((attribs = htsmsg_get_map(body, "attrib")) == NULL) return 0; if((tags = htsmsg_get_map(body, "tags")) == NULL) return 0; if((chid = htsmsg_get_str(attribs, "channel")) == NULL) return 0; if((ch = _xmltv_channel_find(chid, 0, NULL)) == NULL) return 0; if (!LIST_FIRST(&ch->channels)) return 0; if((s = htsmsg_get_str(attribs, "start")) == NULL) return 0; start = _xmltv_str2time(s); if((s = htsmsg_get_str(attribs, "stop")) == NULL) return 0; stop = _xmltv_str2time(s); if(stop <= start || stop <= dispatch_clock) return 0; LIST_FOREACH(ecl, &ch->channels, link) save |= _xmltv_parse_programme_tags(mod, ecl->channel, tags, start, stop, stats); return save; }
static void h_require(const char *name, u_long start, u_long end, int flags, const char *exp) { char buf[4096]; struct extent_region *rp; int n = 0; ATF_REQUIRE_STREQ_MSG(ex->ex_name, name, "expected: \"%s\", got: \"%s\"", name, ex->ex_name); ATF_REQUIRE_EQ_MSG(ex->ex_start, start, "expected: %#lx, got: %#lx", start, ex->ex_start); ATF_REQUIRE_EQ_MSG(ex->ex_end, end, "expected: %#lx, got: %#lx", end, ex->ex_end); ATF_REQUIRE_EQ_MSG(ex->ex_flags, flags, "expected: %#x, got: %#x", flags, ex->ex_flags); (void)memset(buf, 0, sizeof(buf)); LIST_FOREACH(rp, &ex->ex_regions, er_link) n += snprintf(buf + n, sizeof(buf) - n, "0x%lx - 0x%lx\n", rp->er_start, rp->er_end); if (strcmp(buf, exp) == 0) return; printf("Incorrect extent map\n"); printf("Expected:\n%s\n", exp); printf("Got:\n%s\n", buf); atf_tc_fail("incorrect extent map"); }
static void mainloop(void) { gtimer_t *gti; gti_callback_t *cb; struct timespec ts; while(tvheadend_running) { clock_gettime(CLOCK_REALTIME, &ts); /* 1sec stuff */ if (ts.tv_sec > dispatch_clock) { dispatch_clock = ts.tv_sec; comet_flush(); /* Flush idle comet mailboxes */ } /* Global timers */ pthread_mutex_lock(&global_lock); // TODO: there is a risk that if timers re-insert themselves to // the top of the list with a 0 offset we could loop indefinitely #if 0 tvhdebug("gtimer", "now %ld.%09ld", ts.tv_sec, ts.tv_nsec); LIST_FOREACH(gti, >imers, gti_link) tvhdebug("gtimer", " gti %p expire %ld.%08ld", gti, gti->gti_expire.tv_sec, gti->gti_expire.tv_nsec); #endif while((gti = LIST_FIRST(>imers)) != NULL) { if ((gti->gti_expire.tv_sec > ts.tv_sec) || ((gti->gti_expire.tv_sec == ts.tv_sec) && (gti->gti_expire.tv_nsec > ts.tv_nsec))) { ts = gti->gti_expire; break; } cb = gti->gti_callback; //tvhdebug("gtimer", "%p callback", gti); LIST_REMOVE(gti, gti_link); gti->gti_callback = NULL; cb(gti->gti_opaque); } /* Bound wait */ if ((LIST_FIRST(>imers) == NULL) || (ts.tv_sec > (dispatch_clock + 1))) { ts.tv_sec = dispatch_clock + 1; ts.tv_nsec = 0; } /* Wait */ //tvhdebug("gtimer", "wait till %ld.%09ld", ts.tv_sec, ts.tv_nsec); pthread_cond_timedwait(>imer_cond, &global_lock, &ts); pthread_mutex_unlock(&global_lock); } }
void media_global_hold(int on, int flag) { int i; int count; media_pipe_t *mp; hts_mutex_lock(&media_mutex); count = num_media_pipelines; media_pipe_t **mpv = alloca(count * sizeof(media_pipe_t *)); i = 0; LIST_FOREACH(mp, &media_pipelines, mp_global_link) mpv[i++] = mp_retain(mp); hts_mutex_unlock(&media_mutex); for(i = 0; i < count; i++) { mp = mpv[i]; if(!(mp->mp_flags & MP_VIDEO)) continue; if(on) mp_hold(mp, flag, NULL); else mp_unhold(mp, flag); mp_release(mp); } }
static void vfs_mountroot_wait(void) { struct root_hold_token *h; struct timeval lastfail; int curfail; curfail = 0; while (1) { DROP_GIANT(); g_waitidle(); PICKUP_GIANT(); mtx_lock(&mountlist_mtx); if (LIST_EMPTY(&root_holds)) { mtx_unlock(&mountlist_mtx); break; } if (ppsratecheck(&lastfail, &curfail, 1)) { printf("Root mount waiting for:"); LIST_FOREACH(h, &root_holds, list) printf(" %s", h->who); printf("\n"); } msleep(&root_holds, &mountlist_mtx, PZERO | PDROP, "roothold", hz); } }
static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_TYPE(&cgd->inq_data) != T_PROCESSOR) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, ptstart, "pt", CAM_PERIPH_BIO, cgd->ccb_h.path, ptasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ptasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SENT_BDR: case AC_BUS_RESET: { struct pt_softc *softc; struct ccb_hdr *ccbh; softc = (struct pt_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= PT_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= PT_CCB_RETRY_UA; } /* FALLTHROUGH */ default: cam_periph_async(periph, code, path, arg); break; } }
void console_process_exits() { bedrock_node *node; LIST_FOREACH(&exiting_client_list, node) console_free(node->data); bedrock_list_clear(&exiting_client_list); }
static void ssl_wrap_fini(void) { struct listen_entry *le; /* remove every redirect rule */ LIST_FOREACH(le, &listen_ports, next) sslw_remove_redirect(le->sslw_port, le->redir_port); }
static void api_dvr_entry_grid ( access_t *perm, idnode_set_t *ins, api_idnode_grid_conf_t *conf, htsmsg_t *args ) { dvr_entry_t *de; LIST_FOREACH(de, &dvrentries, de_global_link) idnode_set_add(ins, (idnode_t*)de, &conf->filter, perm->aa_lang); }
static void mainloop(void) { gtimer_t *gti; gti_callback_t *cb; time_t now; struct timespec ts; const char *id; while (tvheadend_is_running()) { now = gdispatch_clock_update(); ts.tv_sec = now + 3600; ts.tv_nsec = 0; /* Global timers */ pthread_mutex_lock(&global_lock); // TODO: there is a risk that if timers re-insert themselves to // the top of the list with a 0 offset we could loop indefinitely #if 0 tvhdebug(LS_GTIMER, "now %"PRItime_t, ts.tv_sec); LIST_FOREACH(gti, >imers, gti_link) tvhdebug(LS_GTIMER, " gti %p expire %"PRItimet, gti, gti->gti_expire.tv_sec); #endif while((gti = LIST_FIRST(>imers)) != NULL) { if (gti->gti_expire > now) { ts.tv_sec = gti->gti_expire; break; } #if ENABLE_GTIMER_CHECK id = gti->gti_id; #else id = NULL; #endif tprofile_start(>imer_profile, id); cb = gti->gti_callback; LIST_REMOVE(gti, gti_link); gti->gti_callback = NULL; cb(gti->gti_opaque); tprofile_finish(>imer_profile); } /* Wait */ pthread_cond_timedwait(>imer_cond, &global_lock, &ts); pthread_mutex_unlock(&global_lock); } }
/** * Code for dealing with a complete section */ static void got_ca_section(const uint8_t *data, size_t len, void *opaque) { th_descrambler_t *td; elementary_stream_t *st = opaque; assert(st->es_service->s_source_type == S_MPEG_TS); mpegts_service_t *t = (mpegts_service_t*)st->es_service; LIST_FOREACH(td, &t->s_descramblers, td_service_link) td->td_table(td, (service_t*)t, st, data, len); }
static int ed_pccard_kick_phy(struct ed_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; mii = device_get_softc(sc->miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); return (mii_mediachg(mii)); }
/* * compat debug stuff */ void _thread_dump_info(void) { pthread_t thread; _spinlock(&_thread_lock); LIST_FOREACH(thread, &_thread_list, threads) printf("thread %d flags %d name %s\n", thread->tid, thread->flags, thread->name); _spinunlock(&_thread_lock); }
/* * Applies the mask 'mask' without checking for empty sets or permissions. */ static void cpuset_update(struct cpuset *set, cpuset_t *mask) { struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); CPU_AND(&set->cs_mask, mask); LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update(nset, &set->cs_mask); return; }
/* * Check the hash chains for stray dquot's. */ static void dqflush(struct vnode *vp) { struct dquot *dq; int i; mutex_enter(&dqlock); for (i = 0; i <= dqhash; i++) LIST_FOREACH(dq, &dqhashtbl[i], dq_hash) KASSERT(dq->dq_ump->um_quotas[dq->dq_type] != vp); mutex_exit(&dqlock); }
void IoMessage_mark(IoMessage *self) { IoObject_shouldMarkIfNonNull(DATA(self)->name); IoObject_shouldMarkIfNonNull(DATA(self)->cachedResult); if (DATA(self)->args) { LIST_FOREACH(DATA(self)->args, i, v, IoObject_shouldMark(v)); } IoObject_shouldMarkIfNonNull((IoObject *)DATA(self)->next); IoObject_shouldMarkIfNonNull((IoObject *)DATA(self)->label); }
static void host_destroy(host_t* self) { /* Cleanup */ struct output_chunk* chunk = NULL; LIST_FOREACH(chunk, &self->output_fifo, chunks_list) free(chunk); if (self->socket) self->socket->destroy(self->socket); if (self->addr) free(self->addr); free(self); }
/* * Add multiplex */ void tsfile_add_file ( const char *path ) { mpegts_input_t *mi; mpegts_mux_t *mm; tvhtrace("tsfile", "add file %s", path); /* Create logical instance */ mm = tsfile_mux_create(&tsfile_network); /* Create physical instance (for each tuner) */ LIST_FOREACH(mi, &tsfile_inputs, mi_global_link) tsfile_mux_instance_create(path, mi, mm); }
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) { struct drm_hash_item *entry; struct drm_hash_item_list *h_list; unsigned int hashed_key; int count = 0; hashed_key = hash32_buf(&key, sizeof(key), ht->order); DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key & ht->mask]; LIST_FOREACH(entry, h_list, head) DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); }
void IoLexer_clear(IoLexer *self) { LIST_FOREACH(self->tokenStream, i, t, IoToken_free((IoToken *)t) ); List_removeAll(self->tokenStream); Stack_clear(self->posStack); Stack_clear(self->tokenStack); self->current = self->s; self->resultIndex = 0; self->maxChar = 0; self->errorToken = NULL; }
int cas_mediachange(struct ifnet *ifp) { struct cas_softc *sc = ifp->if_softc; struct mii_data *mii = &sc->sc_mii; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } return (mii_mediachg(&sc->sc_mii)); }
static void shutdown_master(void) { struct timeval tv; struct scan_peer *peer; event_del(&listen_event); (void)close(listen_event_socket); LIST_FOREACH(peer, &inactive_peers, peer_link) (void)shutdown(peer->fd, SHUT_RDWR); tv.tv_sec = 1; tv.tv_usec = 0; event_loopexit(&tv); }
void streaming_target_disconnect(streaming_pad_t *sp, streaming_target_t *st) { int filter; sp->sp_ntargets--; st->st_pad = NULL; LIST_REMOVE(st, st_link); filter = ~0; LIST_FOREACH(st, &sp->sp_targets, st_link) filter &= st->st_reject_filter; sp->sp_reject_filter = filter; }
int smbfs_hashprint(struct mount *mp) { struct smbmount *smp = VFSTOSMBFS(mp); struct smbnode_hashhead *nhpp; struct smbnode *np; int i; for(i = 0; i <= smp->sm_hashlen; i++) { nhpp = &smp->sm_hash[i]; LIST_FOREACH(np, nhpp, n_hash) vprint(NULL, SMBTOV(np)); } return 0; }