static void pfifo_run_puller(NV2AState *d) { uint32_t *pull0 = &d->pfifo.regs[NV_PFIFO_CACHE1_PULL0]; uint32_t *pull1 = &d->pfifo.regs[NV_PFIFO_CACHE1_PULL1]; uint32_t *engine_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_ENGINE]; uint32_t *status = &d->pfifo.regs[NV_PFIFO_CACHE1_STATUS]; uint32_t *get_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_GET]; uint32_t *put_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_PUT]; // TODO // CacheEntry working_cache[NV2A_CACHE1_SIZE]; // int working_cache_size = 0; // pull everything into our own queue // TODO think more about locking while (true) { if (!GET_MASK(*pull0, NV_PFIFO_CACHE1_PULL0_ACCESS)) return; /* empty cache1 */ if (*status & NV_PFIFO_CACHE1_STATUS_LOW_MARK) break; uint32_t get = *get_reg; uint32_t put = *put_reg; assert(get < 128*4 && (get % 4) == 0); uint32_t method_entry = d->pfifo.regs[NV_PFIFO_CACHE1_METHOD + get*2]; uint32_t parameter = d->pfifo.regs[NV_PFIFO_CACHE1_DATA + get*2]; uint32_t new_get = (get+4) & 0x1fc; *get_reg = new_get; if (new_get == put) { // set low mark *status |= NV_PFIFO_CACHE1_STATUS_LOW_MARK; } if (*status & NV_PFIFO_CACHE1_STATUS_HIGH_MARK) { // unset high mark *status &= ~NV_PFIFO_CACHE1_STATUS_HIGH_MARK; // signal pusher qemu_cond_signal(&d->pfifo.pusher_cond); } uint32_t method = method_entry & 0x1FFC; uint32_t subchannel = GET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_SUBCHANNEL); // NV2A_DPRINTF("pull %d 0x%x 0x%x - subch %d\n", get/4, method_entry, parameter, subchannel); if (method == 0) { RAMHTEntry entry = ramht_lookup(d, parameter); assert(entry.valid); // assert(entry.channel_id == state->channel_id); assert(entry.engine == ENGINE_GRAPHICS); /* the engine is bound to the subchannel */ assert(subchannel < 8); SET_MASK(*engine_reg, 3 << (4*subchannel), entry.engine); SET_MASK(*pull1, NV_PFIFO_CACHE1_PULL1_ENGINE, entry.engine); // NV2A_DPRINTF("engine_reg1 %d 0x%x\n", subchannel, *engine_reg); // TODO: this is f****d qemu_mutex_lock(&d->pgraph.lock); //make pgraph busy qemu_mutex_unlock(&d->pfifo.lock); pgraph_context_switch(d, entry.channel_id); pgraph_wait_fifo_access(d); pgraph_method(d, subchannel, 0, entry.instance); // make pgraph not busy qemu_mutex_unlock(&d->pgraph.lock); qemu_mutex_lock(&d->pfifo.lock); } else if (method >= 0x100) { // method passed to engine /* methods that take objects. * TODO: Check this range is correct for the nv2a */ if (method >= 0x180 && method < 0x200) { //qemu_mutex_lock_iothread(); RAMHTEntry entry = ramht_lookup(d, parameter); assert(entry.valid); // assert(entry.channel_id == state->channel_id); parameter = entry.instance; //qemu_mutex_unlock_iothread(); } enum FIFOEngine engine = GET_MASK(*engine_reg, 3 << (4*subchannel)); // NV2A_DPRINTF("engine_reg2 %d 0x%x\n", subchannel, *engine_reg); assert(engine == ENGINE_GRAPHICS); SET_MASK(*pull1, NV_PFIFO_CACHE1_PULL1_ENGINE, engine); // TODO: this is f****d qemu_mutex_lock(&d->pgraph.lock); //make pgraph busy qemu_mutex_unlock(&d->pfifo.lock); pgraph_wait_fifo_access(d); pgraph_method(d, subchannel, method, parameter); // make pgraph not busy qemu_mutex_unlock(&d->pgraph.lock); qemu_mutex_lock(&d->pfifo.lock); } else { assert(false); } } }
static void pfifo_run_pusher(NV2AState *d) { uint32_t *push0 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH0]; uint32_t *push1 = &d->pfifo.regs[NV_PFIFO_CACHE1_PUSH1]; uint32_t *dma_subroutine = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_SUBROUTINE]; uint32_t *dma_state = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_STATE]; uint32_t *dma_push = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUSH]; uint32_t *dma_get = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_GET]; uint32_t *dma_put = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_PUT]; uint32_t *dma_dcount = &d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DCOUNT]; uint32_t *status = &d->pfifo.regs[NV_PFIFO_CACHE1_STATUS]; uint32_t *get_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_GET]; uint32_t *put_reg = &d->pfifo.regs[NV_PFIFO_CACHE1_PUT]; if (!GET_MASK(*push0, NV_PFIFO_CACHE1_PUSH0_ACCESS)) return; if (!GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_ACCESS)) return; /* suspended */ if (GET_MASK(*dma_push, NV_PFIFO_CACHE1_DMA_PUSH_STATUS)) return; // TODO: should we become busy here?? // NV_PFIFO_CACHE1_DMA_PUSH_STATE _BUSY unsigned int channel_id = GET_MASK(*push1, NV_PFIFO_CACHE1_PUSH1_CHID); /* Channel running DMA mode */ uint32_t channel_modes = d->pfifo.regs[NV_PFIFO_MODE]; assert(channel_modes & (1 << channel_id)); assert(GET_MASK(*push1, NV_PFIFO_CACHE1_PUSH1_MODE) == NV_PFIFO_CACHE1_PUSH1_MODE_DMA); /* We're running so there should be no pending errors... */ assert(GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR) == NV_PFIFO_CACHE1_DMA_STATE_ERROR_NONE); hwaddr dma_instance = GET_MASK(d->pfifo.regs[NV_PFIFO_CACHE1_DMA_INSTANCE], NV_PFIFO_CACHE1_DMA_INSTANCE_ADDRESS) << 4; hwaddr dma_len; uint8_t *dma = nv_dma_map(d, dma_instance, &dma_len); while (true) { uint32_t dma_get_v = *dma_get; uint32_t dma_put_v = *dma_put; if (dma_get_v == dma_put_v) break; if (dma_get_v >= dma_len) { assert(false); SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_ERROR, NV_PFIFO_CACHE1_DMA_STATE_ERROR_PROTECTION); break; } uint32_t word = ldl_le_p((uint32_t*)(dma + dma_get_v)); dma_get_v += 4; uint32_t method_type = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE); uint32_t method_subchannel = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_SUBCHANNEL); uint32_t method = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD) << 2; uint32_t method_count = GET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT); uint32_t subroutine_state = GET_MASK(*dma_subroutine, NV_PFIFO_CACHE1_DMA_SUBROUTINE_STATE); if (method_count) { /* full */ if (*status & NV_PFIFO_CACHE1_STATUS_HIGH_MARK) return; /* data word of methods command */ d->pfifo.regs[NV_PFIFO_CACHE1_DMA_DATA_SHADOW] = word; uint32_t put = *put_reg; uint32_t get = *get_reg; assert((method & 3) == 0); uint32_t method_entry = 0; SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_ADDRESS, method >> 2); SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_TYPE, method_type); SET_MASK(method_entry, NV_PFIFO_CACHE1_METHOD_SUBCHANNEL, method_subchannel); // NV2A_DPRINTF("push %d 0x%x 0x%x - subch %d\n", put/4, method_entry, word, method_subchannel); assert(put < 128*4 && (put%4) == 0); d->pfifo.regs[NV_PFIFO_CACHE1_METHOD + put*2] = method_entry; d->pfifo.regs[NV_PFIFO_CACHE1_DATA + put*2] = word; uint32_t new_put = (put+4) & 0x1fc; *put_reg = new_put; if (new_put == get) { // set high mark *status |= NV_PFIFO_CACHE1_STATUS_HIGH_MARK; } if (*status & NV_PFIFO_CACHE1_STATUS_LOW_MARK) { // unset low mark *status &= ~NV_PFIFO_CACHE1_STATUS_LOW_MARK; // signal puller qemu_cond_signal(&d->pfifo.puller_cond); } if (method_type == NV_PFIFO_CACHE1_DMA_STATE_METHOD_TYPE_INC) { SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD, (method + 4) >> 2); } SET_MASK(*dma_state, NV_PFIFO_CACHE1_DMA_STATE_METHOD_COUNT, method_count - 1); (*dma_dcount)++; } else {
static int bnxt_tc_parse_flow(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd, struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; u16 addr_type = 0; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", dissector->used_keys); return -EOPNOTSUPP; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); addr_type = key->addr_type; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); struct flow_dissector_key_basic *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); flow->l2_key.ether_type = key->n_proto; flow->l2_mask.ether_type = mask->n_proto; if (key->n_proto == htons(ETH_P_IP) || key->n_proto == htons(ETH_P_IPV6)) { flow->l4_key.ip_proto = key->ip_proto; flow->l4_mask.ip_proto = mask->ip_proto; } } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_dissector_key_eth_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); struct flow_dissector_key_eth_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; ether_addr_copy(flow->l2_key.dmac, key->dst); ether_addr_copy(flow->l2_mask.dmac, mask->dst); ether_addr_copy(flow->l2_key.smac, key->src); ether_addr_copy(flow->l2_mask.smac, mask->src); } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); struct flow_dissector_key_vlan *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); flow->l2_key.inner_vlan_tci = cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); flow->l2_mask.inner_vlan_tci = cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); flow->l2_mask.inner_vlan_tpid = htons(0xffff); flow->l2_key.num_vlans = 1; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); struct flow_dissector_key_ipv4_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; flow->l3_key.ipv4.daddr.s_addr = key->dst; flow->l3_mask.ipv4.daddr.s_addr = mask->dst; flow->l3_key.ipv4.saddr.s_addr = key->src; flow->l3_mask.ipv4.saddr.s_addr = mask->src; } else if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); struct flow_dissector_key_ipv6_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; flow->l3_key.ipv6.daddr = key->dst; flow->l3_mask.ipv6.daddr = mask->dst; flow->l3_key.ipv6.saddr = key->src; flow->l3_mask.ipv6.saddr = mask->src; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_dissector_key_ports *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); struct flow_dissector_key_ports *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; flow->l4_key.ports.dport = key->dst; flow->l4_mask.ports.dport = mask->dst; flow->l4_key.ports.sport = key->src; flow->l4_mask.ports.sport = mask->src; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { struct flow_dissector_key_icmp *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); struct flow_dissector_key_icmp *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; flow->l4_key.icmp.type = key->type; flow->l4_key.icmp.code = key->code; flow->l4_mask.icmp.type = mask->type; flow->l4_mask.icmp.code = mask->code; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_control *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); addr_type = key->addr_type; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); struct flow_dissector_key_ipv4_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; flow->tun_key.u.ipv4.dst = key->dst; flow->tun_mask.u.ipv4.dst = mask->dst; flow->tun_key.u.ipv4.src = key->src; flow->tun_mask.u.ipv4.src = mask->src; } else if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { return -EOPNOTSUPP; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_dissector_key_keyid *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); struct flow_dissector_key_keyid *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { struct flow_dissector_key_ports *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); struct flow_dissector_key_ports *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; flow->tun_key.tp_dst = key->dst; flow->tun_mask.tp_dst = mask->dst; flow->tun_key.tp_src = key->src; flow->tun_mask.tp_src = mask->src; } return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); }
int get_mtlchunk(FILE *stream,void *data) { /* Grab a chunk from the file and process all its subsets */ Chunk_hdr chunk; Chunk_hdr nxt; uint thistag,nexttag; char *string; short *pshort; RDERR(&chunk,6); thistag=chunk.tag; #ifdef DBGLDMLI if (dbgldmli) printf(" get_mtlchunk: tag=%X, size=%d \n",thistag,chunk.size); #endif /* Update chunk size to account for header */ chunk.size-=6L; /* Find chunk type and go process it */ switch(thistag) { case MLIBMAGIC: while(chunk.size) { if(get_next_chunk(stream,&nxt)==0) return(0); nexttag=nxt.tag; switch(nexttag) { case MAT_ENTRY: loadmtl= &inmtl; /* Zero out data structure first */ init_mtl_struct(loadmtl); if(get_mtlchunk(stream,NULL)==0) return(0); if(put_lib_mtl(loadmtl)==0) return(0); break; default: if(skip_chunk(stream)==0){ MtlError(); return(0); } break; } chunk.size-=nxt.size; } break; case MAT_ENTRY: case MATMAGIC: while(chunk.size) { if(get_next_chunk(stream,&nxt)==0) return(0); nexttag=nxt.tag; switch(nexttag) { case MAT_NAME: strlimit=17; if(get_mtlchunk(stream,loadmtl->name)==0) return(0); #ifdef DBGLDMLI if (dbgldmli) printf(" **** Loading material : %s \n", loadmtl->name); #endif if(just_name) /* If all we need is the name, return */ return(1); break; case MAT_AMBIENT: if(get_mtlchunk(stream,&loadmtl->amb)==0) return(0); break; case MAT_DIFFUSE: if(get_mtlchunk(stream,&loadmtl->diff)==0) return(0); break; case MAT_SPECULAR: if(get_mtlchunk(stream,&loadmtl->spec)==0) return(0); break; case MAT_ACUBIC: { Mapping *m = loadmtl->map[Nrefl]; if (m==NULL) goto skip_mtl_chunk; if (get_mtlchunk(stream,&m->map.p.ref.acb)==0) return(0); } break; case MAT_SXP_TEXT_DATA: case MAT_SXP_TEXT2_DATA: case MAT_SXP_OPAC_DATA: case MAT_SXP_BUMP_DATA: case MAT_SXP_SPEC_DATA: case MAT_SXP_SELFI_DATA: case MAT_SXP_SHIN_DATA: case MAT_SXP_TEXT_MASKDATA: case MAT_SXP_TEXT2_MASKDATA: case MAT_SXP_OPAC_MASKDATA: case MAT_SXP_BUMP_MASKDATA: case MAT_SXP_SPEC_MASKDATA: case MAT_SXP_SELFI_MASKDATA: case MAT_SXP_SHIN_MASKDATA: case MAT_SXP_REFL_MASKDATA: if(get_mtlchunk(stream,NULL)==0) return(0); break; case MAT_TEXMAP: case MAT_TEX2MAP: case MAT_OPACMAP: case MAT_BUMPMAP: case MAT_SPECMAP: case MAT_SHINMAP: case MAT_SELFIMAP: case MAT_REFLMAP: case MAT_TEXMASK: case MAT_TEX2MASK: case MAT_OPACMASK: case MAT_BUMPMASK: case MAT_SPECMASK: case MAT_SHINMASK: case MAT_SELFIMASK: case MAT_REFLMASK: if(get_mtlchunk(stream,NULL)==0) return(0); break; case MAT_SHININESS: case MAT_SHIN2PCT: case MAT_TRANSPARENCY: case MAT_XPFALL: case MAT_REFBLUR: case MAT_SELF_ILPCT: case MAT_SHADING: case MAT_TWO_SIDE: case MAT_SUPERSMP: case MAT_SELF_ILLUM: case MAT_DECAL: case MAT_ADDITIVE: case MAT_WIRE: case MAT_FACEMAP: case MAT_XPFALLIN: case MAT_PHONGSOFT: case MAT_WIREABS: case MAT_USE_XPFALL: case MAT_USE_REFBLUR: case MAT_WIRESIZE: if(get_mtlchunk(stream,NULL)==0) return(0); break; case APP_DATA: if(get_mtlchunk(stream,&loadmtl->appdata)==0) return(0); break; default: skip_mtl_chunk: if(skip_chunk(stream)==0) { MtlError(); return(0); } break; } chunk.size-=nxt.size; } #ifdef DBGLDMLI if (dbgldmli) printf(" finished loading mtl %s, flags = %X\n", loadmtl->name, loadmtl->flags); #endif /* convert old data formats to new */ if (loadmtl->shading==REND_WIRE) { loadmtl->shading = REND_FLAT; loadmtl->flags |= MF_WIRE; loadmtl->flags |= MF_TWOSIDE; loadmtl->shininess=0; loadmtl->shin2pct=0; loadmtl->transparency=0; } if (loadmtl->xpfall<0.0) { loadmtl->flags|= MF_XPFALLIN; loadmtl->xpfall = -loadmtl->xpfall; } if (loadmtl->flags&MF_DECAL) { set_mtl_decal(loadmtl); loadmtl->flags &= ~MF_DECAL; } if (loadmtl->shin2pct==255) { float shin = (((float)(loadmtl->shininess))/100.0f); float atten = (float)sin(1.5707*shin); loadmtl->shin2pct = (int)((atten)*100.0f+0.5f); } break; case MAT_SXP_TEXT_DATA: GET_SXP(Ntex,0); break; case MAT_SXP_TEXT2_DATA: GET_SXP(Ntex2,0); break; case MAT_SXP_OPAC_DATA: GET_SXP(Nopac,0); break; case MAT_SXP_BUMP_DATA: GET_SXP(Nbump,0); break; case MAT_SXP_SPEC_DATA: GET_SXP(Nspec,0); break; case MAT_SXP_SELFI_DATA: GET_SXP(Nselfi,0); break; case MAT_SXP_SHIN_DATA: GET_SXP(Nshin,0); break; case MAT_SXP_TEXT_MASKDATA: GET_SXP(Ntex,1); break; case MAT_SXP_TEXT2_MASKDATA: GET_SXP(Ntex2,1); break; case MAT_SXP_OPAC_MASKDATA: GET_SXP(Nopac,1); break; case MAT_SXP_BUMP_MASKDATA: GET_SXP(Nbump,1); break; case MAT_SXP_SPEC_MASKDATA: GET_SXP(Nspec,1); break; case MAT_SXP_SELFI_MASKDATA: GET_SXP(Nselfi,1); break; case MAT_SXP_SHIN_MASKDATA: GET_SXP(Nshin,1); break; case MAT_SXP_REFL_MASKDATA: GET_SXP(Nrefl,1); break; case MAT_TEXMAP: GET_MAP(Ntex); break; case MAT_TEX2MAP: GET_MAP(Ntex2); break; case MAT_OPACMAP: GET_MAP(Nopac); break; case MAT_BUMPMAP: GET_MAP(Nbump); break; case MAT_SPECMAP: GET_MAP(Nspec); break; case MAT_SHINMAP: GET_MAP(Nshin); break; case MAT_SELFIMAP: GET_MAP(Nselfi); break; case MAT_REFLMAP: GET_MAP(Nrefl); break; case MAT_TEXMASK: GET_MASK(Ntex); break; case MAT_TEX2MASK: GET_MASK(Ntex2); break; case MAT_OPACMASK: GET_MASK(Nopac); break; case MAT_BUMPMASK: GET_MASK(Nbump); break; case MAT_SPECMASK: GET_MASK(Nspec); break; case MAT_SHINMASK: GET_MASK(Nshin); break; case MAT_SELFIMASK: GET_MASK(Nselfi); break; case MAT_REFLMASK: GET_MASK(Nrefl); break; case MAT_AMBIENT: case MAT_DIFFUSE: case MAT_SPECULAR: { int got_lin,got_gam; got_lin = got_gam = 0; while(chunk.size) { if(get_next_chunk(stream,&nxt)==0) return(0); nexttag=nxt.tag; switch(nexttag) { case COLOR_F: case COLOR_24: got_gam = 1; if(get_mtlchunk(stream,NULL)==0) return(0); break; case LIN_COLOR_24: got_lin = 1; if(get_mtlchunk(stream,NULL)==0) return(0); break; default: if(skip_chunk(stream)==0) { MtlError(); return(0); } break; } chunk.size-=nxt.size; } if (got_lin) { memcpy((char *)data,(char *)&LC24,sizeof(Color_24)); } else { if (!got_gam) { MtlError(); return(0); } if (gammaMgr.enable) { Color_24 gc; gc.r = gammaMgr.file_in_degamtab[C24.r]>>8; gc.g = gammaMgr.file_in_degamtab[C24.g]>>8; gc.b = gammaMgr.file_in_degamtab[C24.b]>>8; memcpy((char *)data,(char *)&gc,sizeof(Color_24)); } else { memcpy((char *)data,(char *)&C24,sizeof(Color_24)); } } }
bool Referee::Victory(coords const & c, enum eColor color) { //First method to check victory. Will be improved later for optimization. int countNumberOfSameColoredPowns = 0; int tmpX = c.x - 4; int tmpY = c.y - 4; bool win = false; //First we check on the X axe while(tmpX <= c.x + 4) { if (tmpX >= 0 && tmpX <= 18) { if(this->_b->getIntersection(tmpX, c.y).checkMask(GET_MASK(HAS_ONE, color))) { countNumberOfSameColoredPowns++; if (countNumberOfSameColoredPowns == 5) win = true; printf("count = " + countNumberOfSameColoredPowns); } else countNumberOfSameColoredPowns = 0; } tmpX++; } countNumberOfSameColoredPowns = 0; //Then we check on the Y axe while(tmpY <= c.y + 4) { if (tmpY >= 0 && tmpY <= 18) { if(this->_b->getIntersection(c.x, tmpY).checkMask(GET_MASK(HAS_ONE, color))) { countNumberOfSameColoredPowns++; if (countNumberOfSameColoredPowns == 5) win = true; } else countNumberOfSameColoredPowns = 0; } tmpY++; } //Then we check on diagonal tmpX = c.x - 4; tmpY = c.y - 4; countNumberOfSameColoredPowns = 0; while(tmpY <= c.y + 4 && tmpX <= c.x + 4) { if (tmpY >= 0 && tmpY <= 18 && tmpX >= 0 && tmpX <= 18) { if(this->_b->getIntersection(tmpX, tmpY).checkMask(GET_MASK(HAS_ONE, color))) { countNumberOfSameColoredPowns++; if (countNumberOfSameColoredPowns == 5) win = true; } else countNumberOfSameColoredPowns = 0; } tmpY++; tmpX++; } //Then we check the second diagonal tmpX = c.x - 4; tmpY = c.y + 4; countNumberOfSameColoredPowns = 0; while(tmpY >= c.y - 4 && tmpX <= c.x + 4) { if (tmpY >= 0 && tmpY <= 18 && tmpX >= 0 && tmpX <= 18) { if(this->_b->getIntersection(tmpX, tmpY).checkMask(GET_MASK(HAS_ONE, color))) { countNumberOfSameColoredPowns++; if (countNumberOfSameColoredPowns == 5) win = true; } else countNumberOfSameColoredPowns = 0; } tmpY--; tmpX++; } if (win == true) { //MSG WINNER + QUIT GAME OR RESTARD MSG return (true); } return (false); }