/* * Send traffic from a scheduler instance due by 'now'. * Return a pointer to the head of the queue. */ static struct mbuf * serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) { struct mq def_q; struct dn_schk *s = si->sched; struct mbuf *m = NULL; int delay_line_idle = (si->dline.mq.head == NULL); int done, bw; if (q == NULL) { q = &def_q; q->head = NULL; } bw = s->link.bandwidth; si->kflags &= ~DN_ACTIVE; if (bw > 0) si->credit += (now - si->sched_time) * bw; else si->credit = 0; si->sched_time = now; done = 0; while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { uint64_t len_scaled; done++; len_scaled = (bw == 0) ? 0 : hz * (m->m_pkthdr.len * 8 + extra_bits(m, s)); si->credit -= len_scaled; /* Move packet in the delay line */ dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ; mq_append(&si->dline.mq, m); } /* * If credit >= 0 the instance is idle, mark time. * Otherwise put back in the heap, and adjust the output * time of the last inserted packet, m, which was too early. */ if (si->credit >= 0) { si->idle_time = now; } else { uint64_t t; KASSERT (bw > 0, ("bw=0 and credit<0 ?")); t = div64(bw - 1 - si->credit, bw); if (m) dn_tag_get(m)->output_time += t; si->kflags |= DN_ACTIVE; heap_insert(&dn_cfg.evheap, now + t, si); } if (delay_line_idle && done) transmit_event(q, &si->dline, now); return q->head; }
table_index = get_bits(&s->gb, 1+(w->quant<13) ); w->j_orient_vlc = &j_orient_vlc[w->quant<13][table_index]; } assert(w->j_orient_vlc); assert(w->j_orient_vlc->table); return get_vlc2(&s->gb, w->j_orient_vlc->table, OR_VLC_BITS, OR_VLC_MTD); } #define extra_bits(eb) (eb) #define extra_run (0xFF<<8) #define extra_level (0x00<<8) #define run_offset(r) ((r)<<16) #define level_offset(l) ((l)<<24) static const uint32_t ac_decode_table[]={ /*46*/ extra_bits(3) | extra_run | run_offset(16) | level_offset( 0), /*47*/ extra_bits(3) | extra_run | run_offset(24) | level_offset( 0), /*48*/ extra_bits(2) | extra_run | run_offset( 4) | level_offset( 1), /*49*/ extra_bits(3) | extra_run | run_offset( 8) | level_offset( 1), /*50*/ extra_bits(5) | extra_run | run_offset(32) | level_offset( 0), /*51*/ extra_bits(4) | extra_run | run_offset(16) | level_offset( 1), /*52*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 4), /*53*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 8), /*54*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset(12), /*55*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(16), /*56*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(24), /*57*/ extra_bits(2) | extra_level | run_offset( 1) | level_offset( 3), /*58*/ extra_bits(3) | extra_level | run_offset( 1) | level_offset( 7),