void linux_idr_module_fini(void) { KASSERT(SIMPLEQ_EMPTY(&idr_cache.discarded_nodes)); KASSERT(SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes)); mutex_destroy(&idr_cache.lock); }
static void workqueue_worker(void *cookie) { struct workqueue *wq = cookie; struct workqueue_queue *q; /* find the workqueue of this kthread */ q = workqueue_queue_lookup(wq, curlwp->l_cpu); for (;;) { struct workqhead tmp; /* * we violate abstraction of SIMPLEQ. */ #if defined(DIAGNOSTIC) tmp.sqh_last = (void *)POISON; #endif /* defined(DIAGNOSTIC) */ mutex_enter(&q->q_mutex); while (SIMPLEQ_EMPTY(&q->q_queue)) cv_wait(&q->q_cv, &q->q_mutex); tmp.sqh_first = q->q_queue.sqh_first; /* XXX */ SIMPLEQ_INIT(&q->q_queue); mutex_exit(&q->q_mutex); workqueue_runlist(wq, &tmp); } }
void remove_vnode_from_name_tree(struct fuse *f, struct fuse_vnode *vn) { struct fuse_vn_head *vn_head; struct fuse_vnode *v; struct fuse_vnode *lastv; vn_head = dict_get(&f->name_tree, vn->path); if (vn_head == NULL) return; lastv = NULL; SIMPLEQ_FOREACH(v, vn_head, node) { if (v->parent == vn->parent) break; lastv = v; } if (v == NULL) return; /* if we found the vnode remove it */ if (v == SIMPLEQ_FIRST(vn_head)) SIMPLEQ_REMOVE_HEAD(vn_head, node); else SIMPLEQ_REMOVE_AFTER(vn_head, lastv, node); /* if the queue is empty we need to remove it from the dict */ if (SIMPLEQ_EMPTY(vn_head)) { vn_head = dict_pop(&f->name_tree, vn->path); free(vn_head); } }
void *datalogger_thread(void *queue_ptr) { struct da_entry *dae = NULL; struct s_datalog_entry *dle = NULL; char timestamp[16]; log_debug("starting datalogger thread"); while (1) { pthread_mutex_lock(&da_mutex); pthread_cond_wait(&da_cond, &da_mutex); if (!(SIMPLEQ_EMPTY(&da_head))) { dae = SIMPLEQ_FIRST(&da_head); SIMPLEQ_REMOVE_HEAD(&da_head, da_entries); } pthread_mutex_unlock(&da_mutex); if (dae != NULL) { if (!(dle = (struct s_datalog_entry *)malloc(sizeof(struct s_datalog_entry)))) { log_error("datalogger_thread: malloc failed"); } struct tm *tm_now = localtime(&dae->timestamp); strftime(timestamp, sizeof(timestamp), "%Y%m%d%H%M%S", tm_now); (void)snprintf(dle->line, sizeof(dle->line), "%s,%d,%.02f,%.02f,%.02f,%.02f,%.02f,%d,%.02f,%.02f\n", timestamp, dae->values->host_id, dae->values->temperature, dae->values->pressure, dae->values->humidity, dae->values->light, dae->values->wind_speed, (unsigned int)dae->values->wind_direction, dae->values->wind_chill, dae->values->rainfall); datalogger_write(dle); free(dae); free(dle); } } return 0; }
/* * Put a CCB onto the freelist. */ static void cac_ccb_free(struct cac_softc *sc, struct cac_ccb *ccb) { KASSERT(mutex_owned(&sc->sc_mutex)); ccb->ccb_flags = 0; if (SIMPLEQ_EMPTY(&sc->sc_ccb_free)) cv_signal(&sc->sc_ccb_cv); SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); }
static void cleanum_msg_list(void) { struct msg_elem *notification; pthread_mutex_lock(&msglock); while (!SIMPLEQ_EMPTY(¬ifymsgs)) { notification = SIMPLEQ_FIRST(¬ifymsgs); SIMPLEQ_REMOVE_HEAD(¬ifymsgs, next); free(notification); } nrmsgs = 0; pthread_mutex_unlock(&msglock); }
void *graphite_thread(void *queue_ptr) { struct ga_entry *gae = NULL; struct s_graphite_entry *entry = NULL; // char buffer[1024]; char timestamp[11]; log_debug("starting graphite thread"); while (1) { pthread_mutex_lock(&ga_mutex); pthread_cond_wait(&ga_cond, &ga_mutex); if (!(SIMPLEQ_EMPTY(&ga_head))) { gae = SIMPLEQ_FIRST(&ga_head); SIMPLEQ_REMOVE_HEAD(&ga_head, ga_entries); } pthread_mutex_unlock(&ga_mutex); if (gae != NULL) { struct tm *tm_now = localtime(&gae->timestamp); strftime(timestamp, sizeof(timestamp), "%s", tm_now); if (!(entry = (struct s_graphite_entry *)malloc(sizeof(struct s_graphite_entry)))) { log_error("graphite_thread: malloc failed"); } entry->host_id = gae->values->host_id; entry->temperature = gae->values->temperature; entry->pressure = gae->values->pressure; entry->c_pressure = gae->values->c_pressure; entry->humidity = gae->values->humidity; entry->light = gae->values->light; entry->wind_speed = gae->values->wind_speed; entry->wind_direction = gae->values->wind_direction; entry->wind_chill = gae->values->wind_chill; entry->rainfall = gae->values->rainfall; entry->timestamp = timestamp; graphite_write(entry); free(entry); free(gae); } } return 0; }
static void workqueue_exit(struct work *wk, void *arg) { struct workqueue_exitargs *wqe = (void *)wk; struct workqueue_queue *q = wqe->wqe_q; /* * only competition at this point is workqueue_finiqueue. */ KASSERT(q->q_worker == curlwp); KASSERT(SIMPLEQ_EMPTY(&q->q_queue)); mutex_enter(&q->q_mutex); q->q_worker = NULL; cv_signal(&q->q_cv); mutex_exit(&q->q_mutex); kthread_exit(0); }
STATIC int cardslotdetach(device_t self, int flags) { int rc; struct cardslot_softc *sc = device_private(self); if ((rc = config_detach_children(self, flags)) != 0) return rc; sc->sc_th_enable = 0; wakeup(&sc->sc_events); while (sc->sc_event_thread != NULL) (void)tsleep(sc, PWAIT, "cardslotthd", 0); if (!SIMPLEQ_EMPTY(&sc->sc_events)) aprint_error_dev(self, "events outstanding"); pmf_device_deregister(self); return 0; }
static void workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q) { struct workqueue_exitargs wqe; KASSERT(wq->wq_func == workqueue_exit); wqe.wqe_q = q; KASSERT(SIMPLEQ_EMPTY(&q->q_queue)); KASSERT(q->q_worker != NULL); mutex_enter(&q->q_mutex); SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry); cv_signal(&q->q_cv); while (q->q_worker != NULL) { cv_wait(&q->q_cv, &q->q_mutex); } mutex_exit(&q->q_mutex); mutex_destroy(&q->q_mutex); cv_destroy(&q->q_cv); }
/* * Create a link socket. */ sockid_t lnksock_socket(int type, int protocol, struct sock ** sockp, const struct sockevent_ops ** ops) { struct lnksock *lnk; if (type != SOCK_DGRAM) return EPROTOTYPE; if (protocol != 0) return EPROTONOSUPPORT; if (SIMPLEQ_EMPTY(&lnk_freelist)) return ENOBUFS; lnk = SIMPLEQ_FIRST(&lnk_freelist); SIMPLEQ_REMOVE_HEAD(&lnk_freelist, lnk_next); *sockp = &lnk->lnk_sock; *ops = &lnksock_ops; return SOCKID_LNK | (sockid_t)(lnk - lnk_array); }
bool fsd_equeue_empty(void) { return (SIMPLEQ_EMPTY(&fsd_equeue_head)); }
static int xbd_response_handler(void *arg) { struct buf *bp; struct xbd_softc *xs; blk_ring_resp_entry_t *ring_resp; struct xbdreq *pxr, *xr; int i; for (i = resp_cons; i != blk_ring->resp_prod; i = BLK_RING_INC(i)) { ring_resp = &blk_ring->ring[MASK_BLK_IDX(i)].resp; xr = (struct xbdreq *)ring_resp->id; pxr = xr->xr_parent; DPRINTF(XBDB_IO, ("xbd_response_handler(%d): pxr %p xr %p " "bdone %04lx breq %04lx\n", i, pxr, xr, pxr->xr_bdone, xr->xr_breq)); pxr->xr_bdone -= xr->xr_breq; DIAGCONDPANIC(pxr->xr_bdone < 0, ("xbd_response_handler: pxr->xr_bdone < 0")); if (__predict_false(ring_resp->status)) { pxr->xr_bp->b_flags |= B_ERROR; pxr->xr_bp->b_error = EIO; } if (xr != pxr) { PUT_XBDREQ(xr); if (!SIMPLEQ_EMPTY(&xbdr_suspended)) xbdresume(); } if (pxr->xr_bdone == 0) { bp = pxr->xr_bp; xs = getxbd_softc(bp->b_dev); if (xs == NULL) { /* don't fail bp if we're shutdown */ bp->b_flags |= B_ERROR; bp->b_error = EIO; } DPRINTF(XBDB_IO, ("xbd_response_handler(%d): " "completed bp %p\n", i, bp)); if (bp->b_flags & B_ERROR) bp->b_resid = bp->b_bcount; else bp->b_resid = 0; if (pxr->xr_aligned) unmap_align(pxr); PUT_XBDREQ(pxr); if (xs) { disk_unbusy(&xs->sc_dksc.sc_dkdev, (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ)); #if NRND > 0 rnd_add_uint32(&xs->rnd_source, bp->b_blkno); #endif } biodone(bp); if (!SIMPLEQ_EMPTY(&xbdr_suspended)) xbdresume(); /* XXX possible lockup if this was the only * active device and requests were held back in * the queue. */ if (xs) dk_iodone(xs->sc_di, &xs->sc_dksc); } } resp_cons = i; /* check if xbdresume queued any requests */ if (last_req_prod != req_prod) signal_requests_to_xen(); return 0; }
/* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void rtk_txeof(struct rtk_softc *sc) { struct ifnet *ifp; struct rtk_tx_desc *txd; uint32_t txstat; ifp = &sc->ethercom.ec_if; /* * Go through our tx list and free mbufs for those * frames that have been uploaded. */ while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) { txstat = CSR_READ_4(sc, txd->txd_txstat); if ((txstat & (RTK_TXSTAT_TX_OK| RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q); bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap); m_freem(txd->txd_mbuf); txd->txd_mbuf = NULL; ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24; if (txstat & RTK_TXSTAT_TX_OK) ifp->if_opackets++; else { ifp->if_oerrors++; /* * Increase Early TX threshold if underrun occurred. * Increase step 64 bytes. */ if (txstat & RTK_TXSTAT_TX_UNDERRUN) { #ifdef DEBUG printf("%s: transmit underrun;", device_xname(sc->sc_dev)); #endif if (sc->sc_txthresh < RTK_TXTH_MAX) { sc->sc_txthresh += 2; #ifdef DEBUG printf(" new threshold: %d bytes", sc->sc_txthresh * 32); #endif } #ifdef DEBUG printf("\n"); #endif } if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN)) CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG); } SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q); ifp->if_flags &= ~IFF_OACTIVE; } /* Clear the timeout timer if there is no pending packet. */ if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty)) ifp->if_timer = 0; }
// Called by ms_scan either in a thread or synchronously static void *do_scan(void *userdata) { MediaScan *s = ((thread_data_type *)userdata)->s; int i; struct dirq *dir_head = (struct dirq *)s->_dirq; struct dirq_entry *dir_entry = NULL; struct fileq *file_head = NULL; struct fileq_entry *file_entry = NULL; char tmp_full_path[MAX_PATH_STR_LEN]; // Initialize the cache database if (!init_bdb(s)) { MediaScanError *e = error_create("", MS_ERROR_CACHE, "Unable to initialize libmediascan cache"); send_error(s, e); goto out; } if (s->flags & MS_CLEARDB) { reset_bdb(s); } if (s->progress == NULL) { MediaScanError *e = error_create("", MS_ERROR_TYPE_INVALID_PARAMS, "Progress object not created"); send_error(s, e); goto out; } // Build a list of all directories and paths // We do this first so we can present an accurate scan eta later progress_start_phase(s->progress, "Discovering"); for (i = 0; i < s->npaths; i++) { LOG_INFO("Scanning %s\n", s->paths[i]); recurse_dir(s, s->paths[i], 0); } // Scan all files found progress_start_phase(s->progress, "Scanning"); while (!SIMPLEQ_EMPTY(dir_head)) { dir_entry = SIMPLEQ_FIRST(dir_head); file_head = dir_entry->files; while (!SIMPLEQ_EMPTY(file_head)) { // check if the scan has been aborted if (s->_want_abort) { LOG_DEBUG("Aborting scan\n"); goto aborted; } file_entry = SIMPLEQ_FIRST(file_head); // Construct full path strcpy(tmp_full_path, dir_entry->dir); #ifdef WIN32 strcat(tmp_full_path, "\\"); #else strcat(tmp_full_path, "/"); #endif strcat(tmp_full_path, file_entry->file); ms_scan_file(s, tmp_full_path, file_entry->type); // Send progress update if necessary if (s->on_progress) { s->progress->done++; if (progress_update(s->progress, tmp_full_path)) send_progress(s); } SIMPLEQ_REMOVE_HEAD(file_head, entries); free(file_entry->file); free(file_entry); } SIMPLEQ_REMOVE_HEAD(dir_head, entries); free(dir_entry->dir); free(dir_entry->files); free(dir_entry); } // Send final progress callback if (s->on_progress) { progress_update(s->progress, NULL); send_progress(s); } LOG_DEBUG("Finished scanning\n"); out: if (s->on_finish) send_finish(s); aborted: if (s->async) { LOG_MEM("destroy thread_data @ %p\n", userdata); free(userdata); } return NULL; }
/* * UBSEC Interrupt routine */ int ubsec_intr(void *arg) { struct ubsec_softc *sc = arg; volatile u_int32_t stat; struct ubsec_q *q; struct ubsec_dma *dmap; u_int16_t flags; int npkts = 0, i; stat = READ_REG(sc, BS_STAT); if ((stat & (BS_STAT_MCR1_DONE|BS_STAT_MCR2_DONE|BS_STAT_MCR4_DONE| BS_STAT_DMAERR)) == 0) return (0); stat &= sc->sc_statmask; WRITE_REG(sc, BS_STAT, stat); /* IACK */ /* * Check to see if we have any packets waiting for us */ if ((stat & BS_STAT_MCR1_DONE)) { while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); dmap = q->q_dma; if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); npkts = q->q_nstacked_mcrs; /* * search for further sc_qchip ubsec_q's that share * the same MCR, and complete them too, they must be * at the top. */ for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) ubsec_callback(sc, q->q_stacked_mcr[i]); else break; } ubsec_callback(sc, q); } /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed(sc); } /* * Check to see if we have any key setups/rng's waiting for us */ if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && (stat & BS_STAT_MCR2_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 0, q2->q_mcr.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; /* A bug in new devices requires to swap this field */ if (sc->sc_flags & UBS_FLAGS_MULTIMCR) flags = swap16(mcr->mcr_flags); else flags = mcr->mcr_flags; if ((flags & htole16(UBS_MCR_DONE)) == 0) { bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 0, q2->q_mcr.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed2(sc); } } if ((sc->sc_flags & UBS_FLAGS_RNG4) && (stat & BS_STAT_MCR4_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; while (!SIMPLEQ_EMPTY(&sc->sc_qchip4)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip4); bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 0, q2->q_mcr.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; /* A bug in new devices requires to swap this field */ flags = swap16(mcr->mcr_flags); if ((flags & htole16(UBS_MCR_DONE)) == 0) { bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 0, q2->q_mcr.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip4, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed4(sc); } } /* * Check to see if we got any DMA Error */ if (stat & BS_STAT_DMAERR) { #ifdef UBSEC_DEBUG volatile u_int32_t a = READ_REG(sc, BS_ERR); printf("%s: dmaerr %s@%08x\n", sc->sc_dv.dv_xname, (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR); #endif /* UBSEC_DEBUG */ ubsecstats.hst_dmaerr++; ubsec_totalreset(sc); ubsec_feed(sc); } return (1); }
int ubsec_process(struct cryptop *crp) { struct ubsec_q *q = NULL; int card, err = 0, i, j, s, nicealign; struct ubsec_softc *sc; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int encoffset = 0, macoffset = 0, cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses, key; struct ubsec_dma *dmap = NULL; u_int16_t flags = 0; int ivlen = 0, keylen = 0; if (crp == NULL || crp->crp_callback == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } card = UBSEC_CARD(crp->crp_sid); if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } sc = ubsec_cd.cd_devs[card]; s = splnet(); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; splx(s); err = ENOMEM; goto errout2; } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); splx(s); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&key, sizeof(key)); q->q_sesn = UBSEC_SESSION(crp->crp_sid); q->q_dma = dmap; ses = &sc->sc_sessions[q->q_sesn]; if (crp->crp_flags & CRYPTO_F_IMBUF) { q->q_src_m = (struct mbuf *)crp->crp_buf; q->q_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { q->q_src_io = (struct uio *)crp->crp_buf; q->q_dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) { maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) && (crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the ubsec as requested */ err = EINVAL; goto errout; } } if (enccrd) { if (enccrd->crd_alg == CRYPTO_AES_CBC) { if ((sc->sc_flags & UBS_FLAGS_AES) == 0) { err = EINVAL; goto errout; } flags |= htole16(UBS_PKTCTX_ENC_AES); switch (enccrd->crd_klen) { case 128: case 192: case 256: keylen = enccrd->crd_klen / 8; break; default: err = EINVAL; goto errout; } ivlen = 16; } else { flags |= htole16(UBS_PKTCTX_ENC_3DES); ivlen = 8; keylen = 24; } encoffset = enccrd->crd_skip; if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, key.ses_iv, ivlen); else arc4random_buf(key.ses_iv, ivlen); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) err = m_copyback(q->q_src_m, enccrd->crd_inject, ivlen, key.ses_iv, M_NOWAIT); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(q->q_src_io, enccrd->crd_inject, ivlen, key.ses_iv); if (err) goto errout; } } else { flags |= htole16(UBS_PKTCTX_INBOUND); if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, key.ses_iv, ivlen); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(q->q_src_m, enccrd->crd_inject, ivlen, (caddr_t)key.ses_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(q->q_src_io, enccrd->crd_inject, ivlen, (caddr_t)key.ses_iv); } for (i = 0; i < (keylen / 4); i++) key.ses_key[i] = ses->ses_key[i]; for (i = 0; i < (ivlen / 4); i++) SWAP32(key.ses_iv[i]); } if (maccrd) { macoffset = maccrd->crd_skip; if (maccrd->crd_alg == CRYPTO_MD5_HMAC) flags |= htole16(UBS_PKTCTX_AUTH_MD5); else flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { key.ses_hminner[i] = ses->ses_hminner[i]; key.ses_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(key.ses_hminner[i]); HTOLE32(key.ses_hmouter[i]); } } if (enccrd && maccrd) { /* * ubsec cannot handle packets where the end of encryption * and authentication are not the same, or where the * encrypted part begins before the authenticated part. */ if (((encoffset + enccrd->crd_len) != (macoffset + maccrd->crd_len)) || (enccrd->crd_skip < maccrd->crd_skip)) { err = EINVAL; goto errout; } sskip = maccrd->crd_skip; cpskip = dskip = enccrd->crd_skip; stheend = maccrd->crd_len; dtheend = enccrd->crd_len; coffset = enccrd->crd_skip - maccrd->crd_skip; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); #endif } else { cpskip = dskip = sskip = macoffset + encoffset; dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; cpoffset = cpskip + dtheend; coffset = 0; } if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, q->q_src_m, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, q->q_src_io, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; err = ENOMEM; goto errout; } } nicealign = ubsec_dmamap_aligned(q->q_src_map); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG printf("src skip: %d\n", sskip); #endif for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_map->dm_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (enccrd == NULL && maccrd != NULL) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else {
/* * ubsec_feed() - aggregate and post requests to chip * It is assumed that the caller set splnet() */ void ubsec_feed(struct ubsec_softc *sc) { #ifdef UBSEC_DEBUG static int max; #endif /* UBSEC_DEBUG */ struct ubsec_q *q, *q2; int npkts, i; void *v; u_int32_t stat; npkts = sc->sc_nqueue; if (npkts > sc->sc_maxaggr) npkts = sc->sc_maxaggr; if (npkts < 2) goto feed1; if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if(stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } return; } #ifdef UBSEC_DEBUG printf("merging %d records\n", npkts); /* XXX temporary aggregation statistics reporting code */ if (max < npkts) { max = npkts; printf("%s: new max aggregate %d\n", sc->sc_dv.dv_xname, max); } #endif /* UBSEC_DEBUG */ q = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ for (i = 0; i < q->q_nstacked_mcrs; i++) { q2 = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); if (q2->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - sizeof(struct ubsec_mcr_add); bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); q->q_stacked_mcr[i] = q2; } q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 0, q->q_dma->d_alloc.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); return; feed1: while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if(stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } break; } q = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 0, q->q_dma->d_alloc.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); #ifdef UBSEC_DEBUG printf("feed: q->chip %p %08x\n", q, (u_int32_t)q->q_dma->d_alloc.dma_paddr); #endif /* UBSEC_DEBUG */ SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); } }
void recurse_dir(MediaScan *s, const char *path, int recurse_count) { char *dir, *p; char tmp_full_path[MAX_PATH_STR_LEN]; DIR *dirp; struct dirent *dp; struct dirq *subdirq; // list of subdirs of the current directory struct dirq_entry *parent_entry = NULL; // entry for current dir in s->_dirq char redirect_dir[MAX_PATH_STR_LEN]; if (recurse_count > RECURSE_LIMIT) { LOG_ERROR("Hit recurse limit of %d scanning path %s\n", RECURSE_LIMIT, path); return; } if (path[0] != '/') { // XXX Win32 // Get full path char *buf = (char *)malloc((size_t)MAX_PATH_STR_LEN); if (buf == NULL) { FATAL("Out of memory for directory scan\n"); return; } dir = getcwd(buf, (size_t)MAX_PATH_STR_LEN); strcat(dir, "/"); strcat(dir, path); } else { #ifdef USING_TCMALLOC // strdup will cause tcmalloc to crash on free dir = (char *)malloc((size_t)MAX_PATH_STR_LEN); strcpy(dir, path); #else dir = strdup(path); #endif } // Strip trailing slash if any p = &dir[0]; while (*p != 0) { if (p[1] == 0 && *p == '/') *p = 0; p++; } LOG_INFO("Recursed into %s\n", dir); #if defined(__APPLE__) if (isAlias(dir)) { if (CheckMacAlias(dir, redirect_dir)) { LOG_INFO("Resolving Alias %s to %s\n", dir, redirect_dir); strcpy(dir, redirect_dir); } else { LOG_ERROR("Failure to follow symlink or alias, skipping directory\n"); goto out; } } #elif defined(__linux__) if (isAlias(dir)) { FollowLink(dir, redirect_dir); LOG_INFO("Resolving symlink %s to %s\n", dir, redirect_dir); strcpy(dir, redirect_dir); } #endif if ((dirp = opendir(dir)) == NULL) { LOG_ERROR("Unable to open directory %s: %s\n", dir, strerror(errno)); goto out; } subdirq = malloc(sizeof(struct dirq)); SIMPLEQ_INIT(subdirq); while ((dp = readdir(dirp)) != NULL) { char *name = dp->d_name; // skip all dot files if (name[0] != '.') { // Check if scan should be aborted if (unlikely(s->_want_abort)) break; // XXX some platforms may be missing d_type/DT_DIR if (dp->d_type == DT_DIR) { // Add to list of subdirectories we need to recurse into struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); // Construct full path //*tmp_full_path = 0; strcpy(tmp_full_path, dir); strcat(tmp_full_path, "/"); strcat(tmp_full_path, name); if (_should_scan_dir(s, tmp_full_path)) { subdir_entry->dir = strdup(tmp_full_path); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); } else { LOG_INFO(" skipping subdir: %s\n", tmp_full_path); } } else { enum media_type type = _should_scan(s, name); LOG_INFO("name %s = type %d\n", name, type); if (type) { struct fileq_entry *entry; // Check if this file is a shortcut and if so resolve it #if defined(__APPLE__) if (isAlias(name)) { char full_name[MAX_PATH_STR_LEN]; LOG_INFO("Mac Alias detected\n"); strcpy(full_name, dir); strcat(full_name, "\\"); strcat(full_name, name); parse_lnk(full_name, redirect_dir, MAX_PATH_STR_LEN); if (PathIsDirectory(redirect_dir)) { struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); subdir_entry->dir = strdup(redirect_dir); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); type = 0; } } #elif defined(__linux__) if (isAlias(name)) { char full_name[MAX_PATH_STR_LEN]; printf("Linux Alias detected\n"); strcpy(full_name, dir); strcat(full_name, "\\"); strcat(full_name, name); FollowLink(full_name, redirect_dir); if (PathIsDirectory(redirect_dir)) { struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); subdir_entry->dir = strdup(redirect_dir); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); type = 0; } } #endif if (parent_entry == NULL) { // Add parent directory to list of dirs with files parent_entry = malloc(sizeof(struct dirq_entry)); parent_entry->dir = strdup(dir); parent_entry->files = malloc(sizeof(struct fileq)); SIMPLEQ_INIT(parent_entry->files); SIMPLEQ_INSERT_TAIL((struct dirq *)s->_dirq, parent_entry, entries); } // Add scannable file to this directory list entry = malloc(sizeof(struct fileq_entry)); entry->file = strdup(name); entry->type = type; SIMPLEQ_INSERT_TAIL(parent_entry->files, entry, entries); s->progress->total++; LOG_INFO(" [%5d] file: %s\n", s->progress->total, entry->file); } } } } closedir(dirp); // Send progress update if (s->on_progress && !s->_want_abort) if (progress_update(s->progress, dir)) send_progress(s); // process subdirs while (!SIMPLEQ_EMPTY(subdirq)) { struct dirq_entry *subdir_entry = SIMPLEQ_FIRST(subdirq); SIMPLEQ_REMOVE_HEAD(subdirq, entries); if (!s->_want_abort) recurse_dir(s, subdir_entry->dir, recurse_count); free(subdir_entry); } free(subdirq); out: free(dir); }
void *packet_thread(void *queue_ptr) { struct rp_entry *rpe = NULL; struct pa_entry *pae = NULL; struct s_packet *packet = NULL; char buf[255]; int last_checksum = 0; int same_checksum = 0; log_debug("starting packet thread"); while (1) { pthread_mutex_lock(&rp_mutex); pthread_cond_wait(&rp_cond, &rp_mutex); if (!(SIMPLEQ_EMPTY(&rp_head))) { rpe = SIMPLEQ_FIRST(&rp_head); SIMPLEQ_REMOVE_HEAD(&rp_head, rp_entries); if (!(packet = (struct s_packet *)malloc(sizeof(struct s_packet)))) { printf("packet_thread: s_packet malloc failed\n"); return NULL; } packet = process_packet(rpe->payload); free(rpe); if (!(pae = (struct pa_entry *)malloc(sizeof(struct pa_entry)))) { printf("packet_thread: p_entry malloc failed\n"); return NULL; } pae->packet = packet; free(packet); if (pae->packet->checksum != last_checksum) { last_checksum = pae->packet->checksum; same_checksum = 0; } else { same_checksum += 1; } /* printf("checksum: %d\n", pae->packet->checksum); printf("last_checksum: %d\n", last_checksum); printf("same_checksum: %d\n", same_checksum); */ if (same_checksum <= MAX_SAME_CHECKSUM) { pthread_mutex_lock(&pa_mutex); SIMPLEQ_INSERT_TAIL(&pa_head, pae, pa_entries); pthread_cond_signal(&pa_cond); pthread_mutex_unlock(&pa_mutex); } else { (void)snprintf(buf, sizeof(buf), "found the same values over %d samples", MAX_SAME_CHECKSUM); log_info(buf); gpio_reset(); sleep(10); reset_serial(); } } pthread_mutex_unlock(&rp_mutex); } return 0; }