void ccb_stat(int ac, char **av) { struct si_pstat sip; #define CCB sip.tc_ccb if (ac != 0) prusage(U_STAT_CCB, 1); sip.tc_dev = tc.tc_dev; if (ioctl(ctlfd, TCSI_CCB, &sip) < 0) err(1, "TCSI_CCB on %s", Devname); printf("%s: ", Devname); /* WORD next - Next Channel */ /* WORD addr_uart - Uart address */ /* WORD module - address of module struct */ printf("\tuart_type 0x%x\n", CCB.type); /* BYTE type - Uart type */ /* BYTE fill - */ printf("\tx_status 0x%x %s\n", CCB.x_status, s_xstat(CCB.x_status)); /* BYTE x_status - XON / XOFF status */ printf("\tc_status 0x%x %s\n", CCB.c_status, s_cstat(CCB.c_status)); /* BYTE c_status - cooking status */ printf("\thi_rxipos 0x%x\n", CCB.hi_rxipos); /* BYTE hi_rxipos - stuff into rx buff */ printf("\thi_rxopos 0x%x\n", CCB.hi_rxopos); /* BYTE hi_rxopos - stuff out of rx buffer */ printf("\thi_txopos 0x%x\n", CCB.hi_txopos); /* BYTE hi_txopos - Stuff into tx ptr */ printf("\thi_txipos 0x%x\n", CCB.hi_txipos); /* BYTE hi_txipos - ditto out */ printf("\thi_stat 0x%x %s\n", CCB.hi_stat, s_stat(CCB.hi_stat));/* BYTE hi_stat - Command register */ printf("\tdsr_bit 0x%x\n", CCB.dsr_bit); /* BYTE dsr_bit - Magic bit for DSR */ printf("\ttxon 0x%x\n", CCB.txon); /* BYTE txon - TX XON char */ printf("\ttxoff 0x%x\n", CCB.txoff); /* BYTE txoff - ditto XOFF */ printf("\trxon 0x%x\n", CCB.rxon); /* BYTE rxon - RX XON char */ printf("\trxoff 0x%x\n", CCB.rxoff); /* BYTE rxoff - ditto XOFF */ printf("\thi_mr1 0x%x %s\n", CCB.hi_mr1, s_mr1(CCB.hi_mr1)); /* BYTE hi_mr1 - mode 1 image */ printf("\thi_mr2 0x%x %s\n", CCB.hi_mr2, s_mr2(CCB.hi_mr2)); /* BYTE hi_mr2 - mode 2 image */ printf("\thi_csr 0x%x in:%s out:%s\n", CCB.hi_csr, s_clk(CCB.hi_csr >> 4), s_clk(CCB.hi_csr)); /* BYTE hi_csr - clock register */ printf("\thi_op 0x%x %s\n", CCB.hi_op, s_op(CCB.hi_op)); /* BYTE hi_op - Op control */ printf("\thi_ip 0x%x %s\n", CCB.hi_ip, s_ip(CCB.hi_ip)); /* BYTE hi_ip - Input pins */ printf("\thi_state 0x%x %s\n", CCB.hi_state, s_state(CCB.hi_state)); /* BYTE hi_state - status */ printf("\thi_prtcl 0x%x %s\n", CCB.hi_prtcl, s_prtcl(CCB.hi_prtcl)); /* BYTE hi_prtcl - Protocol */ printf("\thi_txon 0x%x\n", CCB.hi_txon); /* BYTE hi_txon - host copy tx xon stuff */ printf("\thi_txoff 0x%x\n", CCB.hi_txoff); /* BYTE hi_txoff - */ printf("\thi_rxon 0x%x\n", CCB.hi_rxon); /* BYTE hi_rxon - */ printf("\thi_rxoff 0x%x\n", CCB.hi_rxoff); /* BYTE hi_rxoff - */ printf("\tclose_prev 0x%x\n", CCB.close_prev); /* BYTE close_prev - Was channel previously closed */ printf("\thi_break 0x%x %s\n", CCB.hi_break, s_break(CCB.hi_break)); /* BYTE hi_break - host copy break process */ printf("\tbreak_state 0x%x\n", CCB.break_state); /* BYTE break_state - local copy ditto */ printf("\thi_mask 0x%x\n", CCB.hi_mask); /* BYTE hi_mask - Mask for CS7 etc. */ printf("\tmask_z280 0x%x\n", CCB.mask_z280); /* BYTE mask_z280 - Z280's copy */ /* BYTE res[0x60 - 36] - */ /* BYTE hi_txbuf[SLXOS_BUFFERSIZE] - */ /* BYTE hi_rxbuf[SLXOS_BUFFERSIZE] - */ /* BYTE res1[0xA0] - */ }
/* label with the right state. */ void dpf_compile_atom(Atom a) { struct ir *ir; unsigned unchecked, aligned; ir = &a->ir; /* * If we have shifted on this path or the offset surpasses * the minimal amount of buffer space we are allocated, * emit a check to see if we have exceeded our memory. */ unchecked = !ir->shiftp && (ir->u.eq.offset <= DPF_MINMSG); aligned = ((ir->alignment + ir->u.eq.offset) % 4 == 0); a->code[0] = (isshift(ir)) ? s_op(aligned, unchecked) : eq_op(aligned, unchecked, (a->ht != 0)); }
#define s_op(aligned, unchecked) \ op(DPF_OP_SHIFT, aligned, unchecked, 0) typedef enum { /* aligned unchecked hash */ EQ = eq_op(0, 0, 0), EQ_UNCHECKED = eq_op(0, 1, 0), EQ_HASH = eq_op(0, 0, 1), EQ_UNCHECKED_HASH = eq_op(0, 1, 1), EQ_ALIGNED = eq_op(1, 0, 0), EQ_ALIGNED_UNCHECKED = eq_op(1, 1, 0), EQ_ALIGNED_HASH = eq_op(1, 0, 1), EQ_ALIGNED_UNCHECKED_HASH = eq_op(1, 1, 1), /* aligned unchecked */ SHIFT = s_op(0, 0), SHIFT_UNCHECKED = s_op(0, 1), SHIFT_ALIGNED = s_op(1, 0), SHIFT_ALIGNED_UNCHECKED = s_op(1, 1), } state_t; /* label with the right state. */ void dpf_compile_atom(Atom a) { struct ir *ir; unsigned unchecked, aligned; ir = &a->ir; /* * If we have shifted on this path or the offset surpasses * the minimal amount of buffer space we are allocated,