static void ssb_core_reset(struct b44_private *bp) { u32 val; const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET); ssb_core_disable(bp); bw32(bp, B44_SBTMSLOW, mask); bflush(bp, B44_SBTMSLOW, 1); /* Clear SERR if set, this is a hw bug workaround. */ if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) bw32(bp, B44_SBTMSHIGH, 0); val = br32(bp, B44_SBIMSTATE); if (val & (SBIMSTATE_BAD)) { bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD); } bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); bflush(bp, B44_SBTMSLOW, 1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); bflush(bp, B44_SBTMSLOW, 1); }
static buf_s *victim (void) { buf_s *buf; unint wrap = 0; for (;;) { ++Clock; if (Clock == &Buf[Num_bufs]) { Clock = Buf; if (++wrap > 2) { bdump(); fatal("All buffers in use"); return NULL; } } buf = Clock; if (buf->b_use < 0) { if (buf->b_ant.a_state == ANT_FLUSHING) continue; bflush(buf); aver(buf->b_ant.a_state != ANT_FLUSHING); aver(buf->b_ant.a_state != ANT_DIRTY); buf->b_use = 1; rmv(buf); return buf; } if (buf->b_use == 0) { buf->b_use = -1; } } }
/* * Return a count of the number of B_BUSY buffers in the system * Can only be used as a good estimate. If 'cleanit' is set, * try to flush all bufs. */ int bio_busy(int cleanit) { struct buf *bp, *dp; int busy = 0; int i; kmutex_t *hmp; for (i = 0; i < v.v_hbuf; i++) { vfs_syncprogress(); dp = (struct buf *)&hbuf[i]; hmp = &hbuf[i].b_lock; mutex_enter(hmp); for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { if (bp->b_flags & B_BUSY) busy++; } mutex_exit(hmp); } if (cleanit && busy != 0) { bflush(NODEV); } return (busy); }
update() { register struct inode *ip; register struct mount *mp; register *bp; if(updlock) return; updlock++; for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) if(mp->m_bufp != NULL) { ip = mp->m_bufp->b_addr; if(ip->s_fmod==0 || ip->s_ilock!=0 || ip->s_flock!=0 || ip->s_ronly!=0) continue; bp = getblk(mp->m_dev, 1); ip->s_fmod = 0; ip->s_time[0] = time[0]; ip->s_time[1] = time[1]; bcopy(ip, bp->b_addr, 256); bwrite(bp); } for(ip = &inode[0]; ip < &inode[NINODE]; ip++) if((ip->i_flag&ILOCK) == 0) { ip->i_flag =| ILOCK; iupdat(ip, time); prele(ip); } updlock = 0; bflush(NODEV); }
int main(void) { struct fastbuf *w, *r; int t; w = fbmem_create(7); r = fbmem_clone_read(w); bwrite(w, "12345", 5); bwrite(w, "12345", 5); printf("<%d>", (int)btell(w)); bflush(w); printf("<%d>", (int)btell(w)); printf("<%d>", (int)btell(r)); while ((t = bgetc(r)) >= 0) putchar(t); printf("<%d>", (int)btell(r)); bwrite(w, "12345", 5); bwrite(w, "12345", 5); printf("<%d>", (int)btell(w)); bclose(w); bsetpos(r, 0); printf("<!%d>", (int)btell(r)); while ((t = bgetc(r)) >= 0) putchar(t); bsetpos(r, 3); printf("<!%d>", (int)btell(r)); while ((t = bgetc(r)) >= 0) putchar(t); putchar('\n'); fflush(stdout); bclose(r); return 0; }
/** Poll for completed and received packets * * @v netdev Network device */ static void b44_poll(struct net_device *netdev) { struct b44_private *bp = netdev_priv(netdev); u32 istat; /* Interrupt status */ istat = br32(bp, B44_ISTAT); istat &= IMASK_DEF; /* only the events we care about */ if (!istat) return; if (istat & ISTAT_TX) b44_tx_complete(bp); if (istat & ISTAT_RX) b44_process_rx_packets(bp); if (istat & ISTAT_ERRORS) { DBG("b44 error istat=0x%08x\n", istat); /* Reset B44 core partially to avoid long waits */ b44_irq(bp->netdev, 0); b44_halt(bp); b44_init_tx_ring(bp); b44_init_rx_ring(bp); b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); } /* Acknowledge interrupt */ bw32(bp, B44_ISTAT, 0); bflush(bp, B44_ISTAT, 1); }
static buf_s *victim (void) { buf_s *buf; unint wrap = 0; for (;;) { ++Clock; if (Clock == &Buf[Num_bufs]) { Clock = Buf; if (++wrap > 2) { bdump(); eprintf("All buffers in use"); return NULL; } } buf = Clock; if (buf->b_use < 0) { bflush(buf); buf->b_use = 1; unhash(buf); return buf; } if (buf->b_use == 0) { buf->b_use = -1; } } }
static void ssb_core_disable(struct b44_private *bp) { if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) return; bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | SSB_CORE_DOWN)); bflush(bp, B44_SBTMSLOW, 1); bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN); bflush(bp, B44_SBTMSLOW, 1); }
static void printa (PicoSAT * picosat, int partial) { int max_idx = picosat_variables (picosat), i, lit, val; assert (bhead == buffer); for (i = 1; i <= max_idx; i++) { if (partial) { val = picosat_deref_partial (picosat, i); if (!val) continue; } else val = picosat_deref (picosat, i); lit = (val > 0) ? i : -i; printi (lit); } printi (0); if (bhead > buffer) bflush (); }
/* * Chip reset provides power to the b44 MAC & PCI cores, which * is necessary for MAC register access. We only do a partial * reset in case of transmit/receive errors (ISTAT_ERRORS) to * avoid the chip being hung for an unnecessary long time in * this case. * * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe */ static void b44_chip_reset(struct b44_private *bp, int reset_kind) { if (ssb_is_core_up(bp)) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); bw32(bp, B44_DMATX_CTRL, 0); bp->tx_dirty = bp->tx_cur = 0; if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, 100, 0); bw32(bp, B44_DMARX_CTRL, 0); bp->rx_cur = 0; } else { ssb_pci_setup(bp, SBINTVEC_ENET0); } ssb_core_reset(bp); /* Don't enable PHY if we are only doing a partial reset. */ if (reset_kind == B44_CHIP_RESET_PARTIAL) return; /* Make PHY accessible. */ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK))); bflush(bp, B44_MDIO_CTRL, 1); /* Enable internal or external PHY */ if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); bflush(bp, B44_ENET_CTRL, 1); } else { u32 val = br32(bp, B44_DEVCTRL); if (val & DEVCTRL_EPR) { bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); bflush(bp, B44_DEVCTRL, 100); } } }
void bput (buf_s *buf) { aver(buf->b_use > 0); if (!buf->b_dev->d_lazy) { bflush(buf); } --buf->b_use; }
void bclose(struct fastbuf *f) { if (f) { bflush(f); if (f->close) f->close(f); } }
/* {{{ sapi_apache_flush */ static void sapi_apache_flush(void *server_context) { if (server_context) { #if MODULE_MAGIC_NUMBER > 19970110 rflush((request_rec *) server_context); #else bflush((request_rec *) server_context->connection->client); #endif } }
void bsync (tree_s *tree) { buf_s *buf; for (buf = Buf; buf < &Buf[Num_bufs]; buf++) { if (buf->b_tree == tree) { bflush(buf); } } }
int sfdetach(int s, int64_t deadline) { int err = 0; struct sf *conn = msockdata(s, sf_type); if(dill_slow(!conn)) return -1; /* If connection is broken don't even try to do termination handshake. */ if(conn->res == SF_RESET) {err = ECONNRESET; goto dealloc;} /* Ask oworker to exit. */ struct msg msg = {NULL, 0}; int rc = chsend(conn->ochan, &msg, sizeof(msg), deadline); if(dill_slow(rc < 0 && errno == EPIPE)) {err = ECONNRESET; goto dealloc;} if(dill_slow(rc < 0)) {err = errno; goto dealloc;} /* Given that there's no way for oworker to receive this message, the function only exits when it closes the channel. */ rc = chsend(conn->ochan, &msg, sizeof(msg), deadline); dill_assert(rc < 0); if(dill_slow(errno != EPIPE)) {err = errno; goto dealloc;} if(dill_slow(conn->ores == SF_RESET)) {err = ECONNRESET; goto dealloc;} dill_assert(conn->ores == SF_DONE); /* Now that oworker have exited send the termination sequence. */ rc = bsend(conn->u, &sf_termsequence, sizeof(sf_termsequence), -1); if(dill_slow(rc < 0)) {err = errno; goto dealloc;} rc = bflush(conn->u, deadline); if(dill_slow(rc < 0)) {err = errno; goto dealloc;} /* Read and drop any pending inbound messages. By doing this we'll ensure that reading on the underlying socket will continue from the first byte following the sf termination sequence. */ if(conn->res == SF_ACTIVE) { while(1) { struct msg msg; rc = chrecv(conn->ichan, &msg, sizeof(msg), deadline); if(rc < 0) break; free(msg.buf); } if(dill_slow(errno != EPIPE)) {err = errno; goto dealloc;} if(dill_slow(conn->ires == SF_RESET)) {err = ECONNRESET; goto dealloc;} dill_assert(conn->ires == SF_DONE); } dealloc: /* Deallocate the object. */ rc = hclose(conn->iworker); dill_assert(rc == 0); rc = hclose(conn->oworker); dill_assert(rc == 0); rc = hclose(conn->ichan); dill_assert(rc == 0); rc = hclose(conn->ochan); dill_assert(rc == 0); int u = conn->u; free(conn); if(err == 0) return u; rc = hclose(u); dill_assert(rc == 0); errno = err; return -1; }
void expand_wall2(){ //void copy_wall2_h(int kind, int ix, int xofs, int yofs) int ix; exp_wall=1; for(ix=0;ix<16;ix++){ clr_dbuf(); if(f_wx==32)cp_floor_32(); else cp_floor_64(); if((ix&3)==0) copy_wall2_h1(0, 0, 8); if((ix&3)==1) copy_wall2_h1(1, -16, 0); if((ix&3)==2) copy_wall2_h1(3, 16, 0); if((ix&3)==3) copy_wall2_h2(0, 0, 0); if((ix&5)==0) copy_wall2_h1(6, 16, 0); if((ix&5)==1) copy_wall2_h1(7, 0, -8); if((ix&5)==4) copy_wall2_h1(3, 0, 8); if((ix&5)==5) copy_wall2_h2(2, 0, 0); if((ix&10)==0) copy_wall2_h1(2, -16, 0); if((ix&10)==2) copy_wall2_h1(5, 0, -8); if((ix&10)==8) copy_wall2_h1(1, 0, 8); if((ix&10)==10) copy_wall2_h2(1, 0, 0); if((ix&12)==0) copy_wall2_h1(8, 0, -8); if((ix&12)==4) copy_wall2_h1(5, -16, 0); if((ix&12)==8) copy_wall2_h1(7, 16, 0); if((ix&12)==12) copy_wall2_h2(3, 0, 0); if((ix&5)==5) copy_wall_v2(0, 0, 0, 0); if((ix&10)==10) copy_wall_v2(4, 0, 0, 0); if((ix&4)!=0) copy_wall_v2(1, 0, 0, 0); if((ix&8)!=0) copy_wall_v2(3, 0, 0, 0); if((ix&12)==12) copy_wall_v2(2, 0, 0, 0); if((ix&5)==1) copy_wall_v2(1, 1, 0, -8); if((ix&12)==8) copy_wall_v2(1, 1, 16, 0); if((ix&10)==2) copy_wall_v2(3, 1, 0, -8); if((ix&12)==4) copy_wall_v2(3, 1, -16, 0); if((ix&5)==0) copy_wall_v2(0, 1, 16, 0); if((ix&10)==0) copy_wall_v2(4, 1, -16, 0); if((ix&12)==0) copy_wall_v2(2, 1, 0, -8); bflush(); bx++;if(bx==xx0){bx=0;by++;} } }
inline void bsetpos(struct fastbuf *f, ucw_off_t pos) { /* We can optimize seeks only when reading */ if (pos >= f->pos - (f->bstop - f->buffer) && pos <= f->pos) f->bptr = f->bstop + (pos - f->pos); else { bflush(f); if (!f->seek || !f->seek(f, pos, SEEK_SET)) die("bsetpos: stream not seekable"); } }
void bput (void *data) { buf_s *buf; buf = addr2buf(data); aver(buf->b_use > 0); if (!Lazy) { bflush(buf); } aver(buf->b_use > 0); --buf->b_use; }
int fseek(FILE *stream,int offset,uint whence) { int res; if(stream->in.fd >= 0) { /* if we want to move relatively, we have to reduce the offset by the number of chars left * in buffer. */ if(whence == SEEK_CUR && stream->in.pos < stream->in.max) offset -= stream->in.max - stream->in.pos; /* clear buffer */ stream->in.pos = stream->in.max; /* if there is an output-stream, flush it */ if(stream->out.fd >= 0) bflush(stream); res = seek(stream->in.fd,offset,whence); } else if(stream->out.fd >= 0) { /* first flush the buffer */ bflush(stream); res = seek(stream->out.fd,offset,whence); } else { if(whence == SEEK_CUR) { stream->in.pos += offset; stream->out.pos += offset; } else if(whence == SEEK_SET) { stream->in.pos = offset; stream->out.pos = offset; } else { stream->in.pos = stream->in.max; stream->out.pos = stream->out.max; } res = 0; } if(res < 0) { stream->error = res; return res; } return 0; }
ucw_off_t bfilesize(struct fastbuf *f) { if (!f) return 0; ucw_off_t pos = btell(f); bflush(f); if (!f->seek(f, 0, SEEK_END)) return -1; ucw_off_t len = btell(f); bsetpos(f, pos); return len; }
void main(int argc, char **argv) { int i, errs; Fontchar *fc; Bitmap *b; int nc, ht, as; Subfont *f; binit(0, 0, "font merge"); if(argc < 1) usage(); nf = argc-1; for(i = 0; i < nf; i++) snarf(argv[i+1], i); nc = ft[0].sf->n; ht = ft[0].sf->height; as = ft[0].sf->ascent; errs = 0; for(i = 0; i < nf; i++){ if(nc < ft[i].sf->n) nc = ft[i].sf->n; if(ht != ft[1].sf->height){ fprint(2, "%s: %s.height=%d (!= %s.height=%d)\n", argv[0], ft[i].name, ft[i].sf->height, ft[0].name, ht); errs = 1; } if(as != ft[1].sf->ascent){ fprint(2, "%s: %s.ascent=%d (!= %s.ascent=%d)\n", argv[0], ft[i].name, ft[i].sf->ascent, ft[0].name, ht); errs = 1; } } if(errs) exits("param mismatch"); fc = (Fontchar *)malloc(nc*sizeof(Fontchar)); b = balloc(Rect(0, 0, nc*64, ht), ft[0].bm->ldepth); if(b == 0 || fc == 0){ fprint(2, "%s: couldn't malloc %d chars\n", argv0, nc); exits("out of memory"); } bitblt(b, b->r.min, b, b->r, Zero); choose(fc, b, nc, ht, as); wrbitmapfile(1, b); bitblt(&screen, screen.r.min, b, b->r, S); bflush();sleep(5000); f = subfalloc(nc, ht, as, fc, b, ~0, ~0); wrsubfontfile(1, f); exits(0); }
/** * called by b44_poll in the error path */ static void b44_halt(struct b44_private *bp) { /* disable ints */ bw32(bp, B44_IMASK, 0); bflush(bp, B44_IMASK, 1); DBG("b44: powering down PHY\n"); bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); /* * Now reset the chip, but without enabling * the MAC&PHY part of it. * This has to be done _after_ we shut down the PHY */ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); }
int rdioctl (dev_t dev, register u_int cmd, caddr_t addr, int flag) { int *val; val = (int *)addr; if (cmd == RDGETMEDIASIZE) { *val = rdsize(dev); } if (cmd == RDREINIT) { bflush(major(dev)); init_device(major(dev), S_SILENT); } return 0; }
struct fastbuf * fbmem_clone_read(struct fastbuf *b) { struct fastbuf *f = xmalloc_zero(sizeof(struct fb_mem)); struct memstream *s = FB_MEM(b)->stream; bflush(b); s->uc++; FB_MEM(f)->stream = s; f->name = "<fbmem-read>"; f->refill = fbmem_refill; f->seek = fbmem_seek; f->close = fbmem_close; f->can_overwrite_buffer = 1; return f; }
void bseek(struct fastbuf *f, ucw_off_t pos, int whence) { switch (whence) { case SEEK_SET: return bsetpos(f, pos); case SEEK_CUR: return bsetpos(f, btell(f) + pos); case SEEK_END: bflush(f); if (!f->seek || !f->seek(f, pos, SEEK_END)) die("bseek: stream not seekable"); break; default: die("bseek: invalid whence=%d", whence); } }
int sfattach(int s) { int err; int rc; /* This will ensure that s is actually a bytestream. */ rc = bflush(s, -1); if(dill_slow(rc < 0)) return -1; /* Create a sf socket. */ struct sf *conn = malloc(sizeof(struct sf)); if(dill_slow(!conn)) {err = ENOMEM; goto error1;} conn->u = s; conn->ochan = channel(sizeof(struct msg), 0); if(dill_slow(conn->ochan < 0)) {err = errno; goto error2;} conn->ores = SF_ACTIVE; conn->ichan = channel(sizeof(struct msg), 0); if(dill_slow(conn->ichan < 0)) {err = errno; goto error3;} conn->ires = SF_ACTIVE; conn->oworker = go(sf_oworker(conn)); if(dill_slow(conn->oworker < 0)) {err = errno; goto error4;} conn->iworker = go(sf_iworker(conn)); if(dill_slow(conn->iworker < 0)) {err = errno; goto error5;} conn->res = SF_ACTIVE; /* Bind the object to a handle. */ int h = msock(sf_type, conn, &sf_vfptrs); if(dill_slow(h < 0)) {err = errno; goto error6;} return h; error6: rc = hclose(conn->iworker); dill_assert(rc == 0); error5: rc = hclose(conn->oworker); dill_assert(rc == 0); error4: rc = hclose(conn->ichan); dill_assert(rc == 0); error3: rc = hclose(conn->ochan); dill_assert(rc == 0); error2: free(conn); error1: errno = err; return -1; }
static void printi (int i) { char *next; int l; REENTER: if (bhead == buffer) *bhead++ = 'v'; l = sprintf (bhead, " %d", i); next = bhead + l; if (next >= eob) { bflush (); goto REENTER; } else bhead = next; }
static int /* ERRNO if error, 0 if successful. */ sam_inval_dev_call( void *arg, /* Pointer to arguments. */ int size, cred_t *credp) { sam_fsinval_arg_t args; dev_t rdev; if (secpolicy_fs_config(credp, NULL)) { return (EINVAL); } if (size != sizeof (args) || copyin(arg, (caddr_t)&args, sizeof (args))) { return (EFAULT); } rdev = expldev(args.rdev); bflush(rdev); binval(rdev); return (0); }
static coroutine void sf_oworker(struct sf *conn) { struct msg msg = {NULL, 0}; while(1) { int rc = chrecv(conn->ochan, &msg, sizeof(msg), -1); if(dill_slow(rc < 0 && errno == ECANCELED)) break; dill_assert(rc == 0); /* User requests that the coroutine stops. */ if(!msg.buf) {conn->ores = SF_DONE; break;} uint64_t hdr; dill_putll((uint8_t*)&hdr, msg.len); rc = bsend(conn->u, &hdr, sizeof(hdr), -1); CHECKRC(ores) rc = bsend(conn->u, msg.buf, msg.len, -1); CHECKRC(ores) free(msg.buf); msg.buf = NULL; msg.len = 0; rc = bflush(conn->u, -1); CHECKRC(ores) } free(msg.buf); int rc = chdone(conn->ochan); dill_assert(rc == 0); }
/* * update is the internal name of * 'sync'. It goes through the disk * queues to initiate sandbagged I/O; * goes through the I nodes to write * modified nodes; and it goes through * the mount table to initiate modified * super blocks. */ void update(void) { struct inode *ip; struct mount *mp; struct buf *bp; struct filsys *fp; if (debugUpdate) { printf("----- update -----\n"); } if(updlock) return; updlock++; for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) if(mp->m_bufp != NULL) { fp = mp->m_bufp->b_un.b_filsys; if(fp->s_fmod==0 || fp->s_ilock!=0 || fp->s_flock!=0 || fp->s_ronly!=0) continue; bp = getblk(mp->m_dev, SUPERB); if (bp->b_flags & B_ERROR) continue; fp->s_fmod = 0; fp->s_time = time; bcopy((caddr_t)fp, bp->b_un.b_addr, BSIZE); bwrite(bp); } for(ip = &inode[0]; ip < &inode[NINODE]; ip++) if((ip->i_flag&ILOCK)==0 && ip->i_count) { ip->i_flag |= ILOCK; ip->i_count++; iupdat(ip, &time, &time); iput(ip); } updlock = 0; bflush(NODEV); }