static bool apic_valid_ipi_mode(u32 lo_val) { switch (lo_val & APIC_ICR_DLVR_MASK) { case APIC_ICR_DLVR_INIT: case APIC_ICR_DLVR_FIXED: case APIC_ICR_DLVR_LOWPRI: case APIC_ICR_DLVR_NMI: case APIC_ICR_DLVR_SIPI: break; default: panic_printk("FATAL: Unsupported APIC delivery mode, " "ICR.lo=%x\n", lo_val); return false; } switch (lo_val & APIC_ICR_SH_MASK) { case APIC_ICR_SH_NONE: case APIC_ICR_SH_SELF: break; default: panic_printk("FATAL: Unsupported shorthand, ICR.lo=%x\n", lo_val); return false; } return true; }
bool vcpu_handle_io_access(struct registers *guest_regs, struct vcpu_io_intercept *io) { struct per_cpu *cpu_data = this_cpu_data(); int result = 0; /* string and REP-prefixed instructions are not supported */ if (io->rep_or_str) goto invalid_access; result = x86_pci_config_handler(guest_regs, cpu_data->cell, io->port, io->in, io->size); if (result == 0) result = i8042_access_handler(guest_regs, io->port, io->in, io->size); if (result == 1) { vcpu_skip_emulated_instruction(io->inst_len); return true; } invalid_access: panic_printk("FATAL: Invalid PIO %s, port: %x size: %d\n", io->in ? "read" : "write", io->port, io->size); panic_printk("PCI address port: %x\n", cpu_data->cell->pci_addr_port_val); return false; }
int i8042_access_handler(u16 port, bool dir_in, unsigned int size) { union registers *guest_regs = &this_cpu_data()->guest_regs; const struct jailhouse_cell_desc *config = this_cell()->config; const u8 *pio_bitmap = jailhouse_cell_pio_bitmap(config); u8 val; if (port == I8042_CMD_REG && config->pio_bitmap_size >= (I8042_CMD_REG + 7) / 8 && !(pio_bitmap[I8042_CMD_REG / 8] & (1 << (I8042_CMD_REG % 8)))) { if (size != 1) goto invalid_access; if (dir_in) { guest_regs->rax &= ~BYTE_MASK(1); guest_regs->rax |= inb(I8042_CMD_REG); } else { val = (u8)guest_regs->rax; if (val == I8042_CMD_WRITE_CTRL_PORT || (val & I8042_CMD_PULSE_CTRL_PORT) == I8042_CMD_PULSE_CTRL_PORT) goto invalid_access; outb(val, I8042_CMD_REG); } return 1; } return 0; invalid_access: panic_printk("FATAL: Invalid write to i8042 controller port\n"); return -1; }
/** * Handler for accesses to PCI config space. * @param guest_regs Guest register set. * @param cell Issuing cell. * @param port I/O port number of this access. * @param dir_in True for input, false for output. * @param size Size of access in bytes (1, 2 or 4 bytes). * * @return 1 if handled successfully, 0 if unhandled, -1 on access error. */ int x86_pci_config_handler(struct registers *guest_regs, struct cell *cell, u16 port, bool dir_in, unsigned int size) { struct pci_device *device; u32 addr_port_val; u16 bdf, address; int result = 0; if (port == PCI_REG_ADDR_PORT) { /* only 4-byte accesses are valid */ if (size != 4) goto invalid_access; if (dir_in) set_rax_reg(guest_regs, cell->pci_addr_port_val, size); else cell->pci_addr_port_val = get_rax_reg(guest_regs, size); result = 1; } else if (port >= PCI_REG_DATA_PORT && port < (PCI_REG_DATA_PORT + 4)) { /* overflowing accesses are invalid */ if (port + size > PCI_REG_DATA_PORT + 4) goto invalid_access; /* * Decode which register in PCI config space is accessed. It is * essential to store the address port value locally so that we * are not affected by concurrent manipulations by other CPUs * of this cell. */ addr_port_val = cell->pci_addr_port_val; bdf = addr_port_val >> PCI_ADDR_BDF_SHIFT; device = pci_get_assigned_device(cell, bdf); address = (addr_port_val & PCI_ADDR_REGNUM_MASK) + port - PCI_REG_DATA_PORT; if (dir_in) result = data_port_in_handler(guest_regs, device, address, size); else result = data_port_out_handler(guest_regs, device, address, size); if (result < 0) goto invalid_access; } return result; invalid_access: panic_printk("FATAL: Invalid PCI config %s, port: %x, size %d, " "address port: %x\n", dir_in ? "read" : "write", port, size, cell->pci_addr_port_val); return -1; }
static void __nat25_db_print(_adapter *priv) { _irqL irqL; _enter_critical_bh(&priv->br_ext_lock, &irqL); #ifdef BR_EXT_DEBUG static int counter = 0; int i, j; struct nat25_network_db_entry *db; counter++; if ((counter % 16) != 0) return; for (i=0, j=0; i<NAT25_HASH_SIZE; i++) { db = priv->nethash[i]; while (db != NULL) { panic_printk("NAT25: DB(%d) H(%02d) C(%d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x\n", j, i, atomic_read(&db->use_count), db->macAddr[0], db->macAddr[1], db->macAddr[2], db->macAddr[3], db->macAddr[4], db->macAddr[5], db->networkAddr[0], db->networkAddr[1], db->networkAddr[2], db->networkAddr[3], db->networkAddr[4], db->networkAddr[5], db->networkAddr[6], db->networkAddr[7], db->networkAddr[8], db->networkAddr[9], db->networkAddr[10], db->networkAddr[11], db->networkAddr[12], db->networkAddr[13], db->networkAddr[14], db->networkAddr[15], db->networkAddr[16]); j++; db = db->next_hash; } } #endif _exit_critical_bh(&priv->br_ext_lock, &irqL); }
void PHY_Set_SecCCATH_by_RXANT_8814A( IN HAL_PADAPTER Adapter, IN u4Byte ulAntennaRx ) { HAL_PADAPTER priv = Adapter; //1 Setting CCA TH 2nd CCA parameter by Rx Antenna if(priv->pshare->CurrentChannelBW == HT_CHANNEL_WIDTH_80){ switch(ulAntennaRx){ case ANTENNA_A: // xT1R case ANTENNA_B: case ANTENNA_C: case ANTENNA_D: PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x00000001, 0x1); // Enable 2ndCCA PHY_SetBBReg(priv, REG_BB_AGC_TABLE_AC, 0x00FF0000, 0x89); // 0x82C[23:20] = 8, PWDB_TH_QB, 0x82C[19:16] = 9, PWDB_TH_HB PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x0FFF0000, 0x887); // 838[27:24]=8, RF80_secondary40, 838[23:20]=8, RF80_secondary20, 838[19:16]=7, RF80_primary PHY_SetBBReg(priv, REG_BB_L1_Weight_Jaguar, 0x0000F000, 0x7); //840[15:12]=7, L1_square_Pk_weight_80M break; case ANTENNA_AB: // xT2R case ANTENNA_AC: //case ANTENNA_AD: case ANTENNA_BC: case ANTENNA_BD: case ANTENNA_CD: PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x00000001, 0x1); // Enable 2ndCCA PHY_SetBBReg(priv, REG_BB_AGC_TABLE_AC, 0x00FF0000, 0x78); // 0x82C[23:20] = 7, PWDB_TH_QB, 0x82C[19:16] = 8, PWDB_TH_HB PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x0FFF0000, 0x444); // 838[27:24]=4, RF80_secondary40, 838[23:20]=4, RF80_secondary20, 838[19:16]=4, RF80_primary PHY_SetBBReg(priv, REG_BB_L1_Weight_Jaguar, 0x0000F000, 0x6); //840[15:12]=6, L1_square_Pk_weight_80M break; case ANTENNA_ABC: // xT3R //case ANTENNA_ABD: //case ANTENNA_ACD: case ANTENNA_BCD: PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x00000001, 0x1); // Enable 2ndCCA PHY_SetBBReg(priv, REG_BB_AGC_TABLE_AC, 0x00FF0000, 0x98); // 0x82C[23:20] = 9, PWDB_TH_QB, 0x82C[19:16] = 8, PWDB_TH_HB PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x0FFF0000, 0x666); // 838[27:24]=6, RF80_secondary40, 838[23:20]=6, RF80_secondary20, 838[19:16]=6, RF80_primary PHY_SetBBReg(priv, REG_BB_L1_Weight_Jaguar, 0x0000F000, 0x6); //840[15:12]=6, L1_square_Pk_weight_80M break; case ANTENNA_ABCD: // xT4R PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x00000001, 0x1); // Enable 2ndCCA PHY_SetBBReg(priv, REG_BB_AGC_TABLE_AC, 0x00FF0000, 0x98); // 0x82C[23:20] = 9, PWDB_TH_QB, 0x82C[19:16] = 8, PWDB_TH_HB PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x0FFF0000, 0x666); // 838[27:24]=6, RF80_secondary40, 838[23:20]=6, RF80_secondary20, 838[19:16]=6, RF80_primary PHY_SetBBReg(priv, REG_BB_L1_Weight_Jaguar, 0x0000F000, 0x7); //840[15:12]=7, L1_square_Pk_weight_80M break; default: panic_printk("Unknown Rx antenna.\n"); break; } }else{ PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x00000001, 0x0); // Enable 2ndCCA PHY_SetBBReg(priv, REG_BB_AGC_TABLE_AC, 0x00FF0000, 0x43); // 0x82C[23:20] = 9, PWDB_TH_QB, 0x82C[19:16] = 8, PWDB_TH_HB PHY_SetBBReg(priv, REG_BB_CCAONSEC_AC, 0x0FFF0000, 0x7aa); // 838[27:24]=6, RF80_secondary40, 838[23:20]=6, RF80_secondary20, 838[19:16]=6, RF80_primary PHY_SetBBReg(priv, REG_BB_L1_Weight_Jaguar, 0x0000F000, 0x7); //840[15:12]=7, L1_square_Pk_weight_80M } }
static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio) { const struct jailhouse_memory *mem = arg; u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ; unsigned long page_virt = TEMPORARY_MAPPING_BASE + this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES; unsigned long page_phys = ((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK; unsigned long virt_base; int err; /* check read/write access permissions */ if (!(mem->flags & perm)) goto invalid_access; /* width bit according to access size needs to be set */ if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags)) goto invalid_access; /* naturally unaligned access needs to be allowed explicitly */ if (mmio->address & (mmio->size - 1) && !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED)) goto invalid_access; err = paging_create(&hv_paging_structs, page_phys, PAGE_SIZE, page_virt, PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE, PAGING_NON_COHERENT); if (err) goto invalid_access; /* * This virt_base gives the following effective virtual address in * mmio_perform_access: * * page_virt + (mem->phys_start & ~PAGE_MASK) + * (mmio->address & ~PAGE_MASK) * * Reason: mmio_perform_access does addr = base + mmio->address. */ virt_base = page_virt + (mem->phys_start & ~PAGE_MASK) - (mmio->address & PAGE_MASK); mmio_perform_access((void *)virt_base, mmio); return MMIO_HANDLED; invalid_access: panic_printk("FATAL: Invalid MMIO %s, address: %x, size: %x\n", mmio->is_write ? "write" : "read", mem->phys_start + mmio->address, mmio->size); return MMIO_ERROR; }
bool vcpu_handle_pt_violation(struct registers *guest_regs, struct vcpu_pf_intercept *pf) { struct per_cpu *cpu_data = this_cpu_data(); struct guest_paging_structures pg_structs; struct vcpu_execution_state x_state; struct mmio_access access; int result = 0; u32 val; vcpu_vendor_get_execution_state(&x_state); if (!vcpu_get_guest_paging_structs(&pg_structs)) goto invalid_access; access = mmio_parse(x_state.rip, &pg_structs, pf->is_write); if (!access.inst_len || access.size != 4) goto invalid_access; if (pf->is_write) val = ((unsigned long *)guest_regs)[access.reg]; result = ioapic_access_handler(cpu_data->cell, pf->is_write, pf->phys_addr, &val); if (result == 0) result = pci_mmio_access_handler(cpu_data->cell, pf->is_write, pf->phys_addr, &val); if (result == 0) result = iommu_mmio_access_handler(pf->is_write, pf->phys_addr, &val); if (result == 1) { if (!pf->is_write) ((unsigned long *)guest_regs)[access.reg] = val; vcpu_skip_emulated_instruction(access.inst_len); return true; } invalid_access: /* report only unhandled access failures */ if (result == 0) panic_printk("FATAL: Invalid MMIO/RAM %s, addr: %p\n", pf->is_write ? "write" : "read", pf->phys_addr); return false; }
static int br_ioctl_device(struct net_bridge *br, unsigned int cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2) { if (br == NULL) return -EINVAL; switch (cmd) { case BRCTL_ADD_IF: case BRCTL_DEL_IF: { struct net_device *dev; int ret; dev = dev_get_by_index(arg0); if (dev == NULL) return -EINVAL; if (cmd == BRCTL_ADD_IF) ret = br_add_if(br, dev); else ret = br_del_if(br, dev); dev_put(dev); return ret; } case BRCTL_GET_BRIDGE_INFO: { struct __bridge_info b; memset(&b, 0, sizeof(struct __bridge_info)); memcpy(&b.designated_root, &br->designated_root, 8); memcpy(&b.bridge_id, &br->bridge_id, 8); b.root_path_cost = br->root_path_cost; b.max_age = br->max_age; b.hello_time = br->hello_time; b.forward_delay = br->forward_delay; b.bridge_max_age = br->bridge_max_age; b.bridge_hello_time = br->bridge_hello_time; b.bridge_forward_delay = br->bridge_forward_delay; b.topology_change = br->topology_change; b.topology_change_detected = br->topology_change_detected; b.root_port = br->root_port; b.stp_enabled = br->stp_enabled; b.ageing_time = br->ageing_time; b.gc_interval = br->gc_interval; b.hello_timer_value = br_timer_get_residue(&br->hello_timer); b.tcn_timer_value = br_timer_get_residue(&br->tcn_timer); b.topology_change_timer_value = br_timer_get_residue(&br->topology_change_timer); b.gc_timer_value = br_timer_get_residue(&br->gc_timer); if (copy_to_user((void *)arg0, &b, sizeof(b))) return -EFAULT; return 0; } case BRCTL_GET_PORT_LIST: { int i; int indices[256]; for (i=0;i<256;i++) indices[i] = 0; br_get_port_ifindices(br, indices); if (copy_to_user((void *)arg0, indices, 256*sizeof(int))) return -EFAULT; return 0; } case BRCTL_SET_BRIDGE_FORWARD_DELAY: br->bridge_forward_delay = arg0; if (br_is_root_bridge(br)) br->forward_delay = arg0; return 0; case BRCTL_SET_BRIDGE_HELLO_TIME: br->bridge_hello_time = arg0; if (br_is_root_bridge(br)) br->hello_time = arg0; return 0; case BRCTL_SET_BRIDGE_MAX_AGE: br->bridge_max_age = arg0; if (br_is_root_bridge(br)) br->max_age = arg0; return 0; case BRCTL_SET_AGEING_TIME: br->ageing_time = arg0; return 0; case BRCTL_SET_GC_INTERVAL: br->gc_interval = arg0; return 0; case BRCTL_GET_PORT_INFO: { struct __port_info p; struct net_bridge_port *pt; if ((pt = br_get_port(br, arg1)) == NULL) return -EINVAL; memset(&p, 0, sizeof(struct __port_info)); memcpy(&p.designated_root, &pt->designated_root, 8); memcpy(&p.designated_bridge, &pt->designated_bridge, 8); p.port_id = pt->port_id; p.designated_port = pt->designated_port; p.path_cost = pt->path_cost; p.designated_cost = pt->designated_cost; p.state = pt->state; p.top_change_ack = pt->topology_change_ack; p.config_pending = pt->config_pending; p.message_age_timer_value = br_timer_get_residue(&pt->message_age_timer); p.forward_delay_timer_value = br_timer_get_residue(&pt->forward_delay_timer); p.hold_timer_value = br_timer_get_residue(&pt->hold_timer); if (copy_to_user((void *)arg0, &p, sizeof(p))) return -EFAULT; return 0; } case BRCTL_SET_BRIDGE_STP_STATE: br->stp_enabled = arg0?1:0; return 0; case BRCTL_SET_BRIDGE_PRIORITY: br_stp_set_bridge_priority(br, arg0); return 0; case BRCTL_SET_PORT_PRIORITY: { struct net_bridge_port *p; if ((p = br_get_port(br, arg0)) == NULL) return -EINVAL; br_stp_set_port_priority(p, arg1); return 0; } case BRCTL_SET_PATH_COST: { struct net_bridge_port *p; if ((p = br_get_port(br, arg0)) == NULL) return -EINVAL; br_stp_set_path_cost(p, arg1); return 0; } case BRCTL_GET_FDB_ENTRIES: #ifdef CONFIG_RTK_GUEST_ZONE return br_fdb_get_entries(br, (void *)arg0, arg1, arg2, 0); #else return br_fdb_get_entries(br, (void *)arg0, arg1, arg2); #endif #ifdef MULTICAST_FILTER case 101: printk(KERN_INFO "%s: clear port list of multicast filter\n", br->dev.name); br->fltr_portlist_num = 0; return 0; case 102: { int i; if (br->fltr_portlist_num == MLCST_FLTR_ENTRY) { printk(KERN_INFO "%s: set port num of multicast filter, entries full!\n", br->dev.name); return 0; } for (i=0; i<br->fltr_portlist_num; i++) if (br->fltr_portlist[i] == (unsigned short)arg0) return 0; printk(KERN_INFO "%s: set port num [%d] of multicast filter\n", br->dev.name, (unsigned short)arg0); br->fltr_portlist[br->fltr_portlist_num] = (unsigned short)arg0; br->fltr_portlist_num++; return 0; } #endif #ifdef MULTICAST_BWCTRL case 103: { struct net_bridge_port *p; if ((p = br_get_port(br, arg0)) == NULL) return -EINVAL; if (arg1 == 0) { p->bandwidth = 0; printk(KERN_INFO "%s: port %i(%s) multicast bandwidth all\n", p->br->dev.name, p->port_no, p->dev->name); } else { p->bandwidth = arg1 * 1000 / 8; printk(KERN_INFO "%s: port %i(%s) multicast bandwidth %dkbps\n", p->br->dev.name, p->port_no, p->dev->name, (unsigned int)arg1); } return 0; } #endif #ifdef RTL_BRIDGE_MAC_CLONE case 104: // MAC Clone enable/disable { struct net_bridge_port *p; unsigned char nullmac[] = {0, 0, 0, 0, 0, 0}; if ((p = br_get_port(br, arg0)) == NULL) return -EINVAL; if ((p->macCloneTargetPort = br_get_port(br, arg1)) == NULL) return -EINVAL; p->enable_mac_clone = 1; p->mac_clone_completed = 0; if (clone_pair.port != p->macCloneTargetPort) { TRACE("clone_pair.port [%x] != p->macCloneTargetPort [%x], don't clone\n", (unsigned int)clone_pair.port, (unsigned int)p->macCloneTargetPort); clone_pair.port = p->macCloneTargetPort; TRACE("clone_pair.port = %x\n", (unsigned int)clone_pair.port); memset(clone_pair.mac.addr, 0, ETH_ALEN); } else { if(!memcmp(clone_pair.mac.addr, nullmac, ETH_ALEN)) { TRACE("clone_pair.mac.addr == nullmac, don't clone\n"); } else { TRACE("Clone MAC from previous one\n"); br_mac_clone(p->macCloneTargetPort, clone_pair.mac.addr); } } TRACE("device %s, Enable MAC Clone to device %s\n", p->dev->name, p->macCloneTargetPort->dev->name); return 0; } #endif #ifdef CONFIG_RTK_GUEST_ZONE case 105: // set zone { struct net_bridge_port *p; if ((p = br_get_port(br, arg0)) == NULL) return -EINVAL; p->is_guest_zone = arg1; #ifdef DEBUG_GUEST_ZONE panic_printk("set device=%s is_guest_zone=%d\n", p->dev->name, p->is_guest_zone); #endif return 0; } case 106: // set zone isolation br->is_zone_isolated = arg0; #ifdef DEBUG_GUEST_ZONE panic_printk("set zone isolation=%d\n", br->is_zone_isolated); #endif return 0; case 107: // set guest isolation br->is_guest_isolated = arg0; #ifdef DEBUG_GUEST_ZONE panic_printk("set guest isolation=%d\n", br->is_guest_isolated); #endif return 0; case 108: // set lock mac list { unsigned char mac[6]; int i; if (copy_from_user(mac, (unsigned long*)arg0, 6)) return -EFAULT; #ifdef DEBUG_GUEST_ZONE panic_printk("set lock client list=%02x:%02x:%02x:%02x:%02x:%02x\n", mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]); #endif if (!memcmp(mac, "\x0\x0\x0\x0\x0\x0", 6)) { // reset list #ifdef DEBUG_GUEST_ZONE panic_printk("reset lock list!\n"); #endif br->lock_client_num = 0; return 0; } for (i=0; i<br->lock_client_num; i++) { if (!memcmp(mac, br->lock_client_list[i], 6)) { #ifdef DEBUG_GUEST_ZONE panic_printk("duplicated lock entry!\n"); #endif return 0; } } if (br->lock_client_num >= MAX_LOCK_CLIENT) { #ifdef DEBUG_GUEST_ZONE panic_printk("Add failed, lock list table full!\n"); #endif return 0; } memcpy(br->lock_client_list[br->lock_client_num], mac, 6); br->lock_client_num++; return 0; } case 109: // show guest info { int i; panic_printk("\n"); panic_printk(" zone isolation: %d\n", br->is_zone_isolated); panic_printk(" guest isolation: %d\n", br->is_guest_isolated); i = 1; while (1) { struct net_bridge_port *p; if ((p = br_get_port(br, i++)) == NULL) break; panic_printk(" %s: %s\n", p->dev->name, (p->is_guest_zone ? "guest" : "host")); } panic_printk(" locked client no: %d\n", br->lock_client_num); for (i=0; i< br->lock_client_num; i++) { unsigned char *mac; mac = br->lock_client_list[i]; panic_printk(" mac=%02x:%02x:%02x:%02x:%02x:%02x\n", mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]); } panic_printk("\n"); return 0; } case 110: return br_fdb_get_entries(br, (void *)arg0, arg1, arg2, 1); #endif // CONFIG_RTK_GUEST_ZONE } return -EOPNOTSUPP; }
void nat25_db_expire(_adapter *priv) { int i; _irqL irqL; _enter_critical_bh(&priv->br_ext_lock, &irqL); //if(!priv->ethBrExtInfo.nat25_disable) { for (i=0; i<NAT25_HASH_SIZE; i++) { struct nat25_network_db_entry *f, *g; for (f = priv->nethash[i]; f != NULL; f = g) { g = f->next_hash; if(__nat25_has_expired(priv, f)) { if(atomic_dec_and_test(&f->use_count)) { #ifdef BR_EXT_DEBUG #ifdef CL_IPV6_PASS panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x\n", i, f->macAddr[0], f->macAddr[1], f->macAddr[2], f->macAddr[3], f->macAddr[4], f->macAddr[5], f->networkAddr[0], f->networkAddr[1], f->networkAddr[2], f->networkAddr[3], f->networkAddr[4], f->networkAddr[5], f->networkAddr[6], f->networkAddr[7], f->networkAddr[8], f->networkAddr[9], f->networkAddr[10], f->networkAddr[11], f->networkAddr[12], f->networkAddr[13], f->networkAddr[14], f->networkAddr[15], f->networkAddr[16]); #else panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", i, f->macAddr[0], f->macAddr[1], f->macAddr[2], f->macAddr[3], f->macAddr[4], f->macAddr[5], f->networkAddr[0], f->networkAddr[1], f->networkAddr[2], f->networkAddr[3], f->networkAddr[4], f->networkAddr[5], f->networkAddr[6], f->networkAddr[7], f->networkAddr[8], f->networkAddr[9], f->networkAddr[10]); #endif #endif if(priv->scdb_entry == f) { set_zero_mac_addr(priv->scdb_mac); RTW_WN32(priv->scdb_ip, 0); priv->scdb_entry = NULL; } __network_hash_unlink(f); rtw_mfree((u8 *) f, sizeof(struct nat25_network_db_entry)); } } } } } _exit_critical_bh(&priv->br_ext_lock, &irqL); }
int nat25_handle_frame(_adapter *priv, struct sk_buff *skb) { #ifdef BR_EXT_DEBUG if((!priv->ethBrExtInfo.nat25_disable) && (!(skb->data[0] & 1))) { panic_printk("NAT25: Input Frame: DA=%02x%02x%02x%02x%02x%02x SA=%02x%02x%02x%02x%02x%02x\n", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]); } #endif if(!(skb->data[0] & 1)) { int is_vlan_tag = 0, i, retval = 0; unsigned short vlan_hdr = 0; if (RTW_RN16A(skb->data + ETH_ALEN * 2) == __constant_htons(ETH_P_8021Q)) { is_vlan_tag = 1; vlan_hdr = RTW_RN16A(skb->data + ETH_ALEN * 2 + 2); for (i = 0; i < 6; i++) RTW_WN16A(skb->data + ETH_ALEN * 2 + 2 - i * 2, RTW_RN16A(skb->data + ETH_ALEN * 2 - 2 - i * 2)); skb_pull(skb, 4); } if (!priv->ethBrExtInfo.nat25_disable) { _irqL irqL; _enter_critical_bh(&priv->br_ext_lock, &irqL); /* * This function look up the destination network address from * the NAT2.5 database. Return value = -1 means that the * corresponding network protocol is NOT support. */ if (!priv->ethBrExtInfo.nat25sc_disable && RTW_RN16A(skb->data + ETH_ALEN * 2) == __constant_htons(ETH_P_IP) && RTW_RN32(priv->scdb_ip) == RTW_RN32(skb->data + ETH_HLEN + 16)) { copy_mac_addr(skb->data, priv->scdb_mac); _exit_critical_bh(&priv->br_ext_lock, &irqL); } else { _exit_critical_bh(&priv->br_ext_lock, &irqL); retval = nat25_db_handle(priv, skb, NAT25_LOOKUP); } } else { if ((RTW_RN16A(skb->data + ETH_ALEN * 2) == __constant_htons(ETH_P_IP) && RTW_RN32(priv->br_ip) == RTW_RN32(skb->data + ETH_HLEN + 16)) || (RTW_RN16A(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_ARP) && RTW_RN32(priv->br_ip) == RTW_RN32(skb->data + ETH_HLEN + 24))) { // for traffic to upper TCP/IP retval = nat25_db_handle(priv, skb, NAT25_LOOKUP); } } if (is_vlan_tag) { skb_push(skb, 4); for (i = 0; i < 6; i++) RTW_WN16A(skb->data + i * 2, RTW_RN16A(skb->data + 4 + i * 2)); RTW_WN16A(skb->data + ETH_ALEN * 2, __constant_htons(ETH_P_8021Q)); RTW_WN16A(skb->data + ETH_ALEN * 2 + 2, vlan_hdr); } if(retval == -1) { //DEBUG_ERR("NAT25: Lookup fail!\n"); return -1; } }
/************************************************************************* * FUNCTION * swNic_send * * DESCRIPTION * This function writes one packet to tx descriptors, and waits until * the packet is successfully sent. * * INPUTS * None * * OUTPUTS * None *************************************************************************/ __MIPS16 __IRAM_FWD inline int32 _swNic_send(void *skb, void * output, uint32 len,rtl_nicTx_info *nicTx) { struct rtl_pktHdr * pPkthdr; int next_index, ret; if ((currTxPkthdrDescIndex[nicTx->txIdx]+1)==txPkthdrRingCnt[nicTx->txIdx]) next_index = 0; else next_index = currTxPkthdrDescIndex[nicTx->txIdx]+1; if (next_index == txPktDoneDescIndex[nicTx->txIdx]) { /* TX ring full */ return -1; } #if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C) pPkthdr = (struct rtl_pktHdr *) ((int32) txPkthdrRing_base[nicTx->txIdx] + (sizeof(struct rtl_pktHdr) * currTxPkthdrDescIndex[nicTx->txIdx])); #else /* Fetch packet header from Tx ring */ pPkthdr = (struct rtl_pktHdr *) ((int32) txPkthdrRing[nicTx->txIdx][currTxPkthdrDescIndex[nicTx->txIdx]] & ~(DESC_OWNED_BIT | DESC_WRAP)); #endif /* Pad small packets and add CRC */ if ( len < 60 ) len = 64; else len += 4; pPkthdr->ph_mbuf->m_len = len; pPkthdr->ph_mbuf->m_extsize = len; pPkthdr->ph_mbuf->skb = skb; pPkthdr->ph_len = len; pPkthdr->ph_vlanId = nicTx->vid; #if defined(CONFIG_8198_PORT5_GMII) || defined(CONFIG_8198_PORT5_RGMII) || defined(CONFIG_RTL_8198C_8367RB) pPkthdr->ph_portlist = nicTx->portlist&0x3f; #else pPkthdr->ph_portlist = nicTx->portlist&0x1f; #endif pPkthdr->ph_srcExtPortNum = nicTx->srcExtPort; pPkthdr->ph_flags = nicTx->flags; #if defined(CONFIG_RTL_HW_QOS_SUPPORT) || defined(CONFIG_RTK_VOIP_QOS) pPkthdr->ph_txPriority = nicTx->priority; #endif #ifdef CONFIG_RTK_VLAN_WAN_TAG_SUPPORT if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q)) pPkthdr->ph_txCVlanTagAutoAdd = nicTx->tagport; else pPkthdr->ph_txCVlanTagAutoAdd = 0; #endif #if defined(CONFIG_RTL_VLAN_8021Q) || defined(CONFIG_SWCONFIG) if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q)) { pPkthdr->ph_txCVlanTagAutoAdd = (0x3f) & rtk_get_vlan_tagmask(pPkthdr->ph_vlanId); } else pPkthdr->ph_txCVlanTagAutoAdd = 0; #if 0 panic_printk("%s %d pPkthdr->ph_txCVlanTagAutoAdd=0x%x pPkthdr->ph_portlist=0x%x pPkthdr->ph_vlanId=%d\n", __FUNCTION__, __LINE__, pPkthdr->ph_txCVlanTagAutoAdd, pPkthdr->ph_portlist, pPkthdr->ph_vlanId); #endif #elif defined(CONFIG_RTL_HW_VLAN_SUPPORT) if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q)) pPkthdr->ph_txCVlanTagAutoAdd = auto_set_tag_portmask; else pPkthdr->ph_txCVlanTagAutoAdd = 0; #endif /* Set cluster pointer to buffer */ pPkthdr->ph_mbuf->m_data = (output); pPkthdr->ph_mbuf->m_extbuf = (output); #if defined(CONFIG_RTL_819XD) || defined(CONFIG_RTL_8196E) || defined(CONFIG_RTL_8198C) pPkthdr->ph_ptpPkt = 0; #endif #ifdef _PKTHDR_CACHEABLE #if defined(CONFIG_RTL_8198C) _dma_cache_wback((unsigned long)pPkthdr, sizeof(struct rtl_pktHdr)); _dma_cache_wback((unsigned long)(pPkthdr->ph_mbuf), sizeof(struct rtl_mBuf)); #else _dma_cache_wback_inv((unsigned long)pPkthdr, sizeof(struct rtl_pktHdr)); _dma_cache_wback_inv((unsigned long)(pPkthdr->ph_mbuf), sizeof(struct rtl_mBuf)); #endif #endif ret = currTxPkthdrDescIndex[nicTx->txIdx]; currTxPkthdrDescIndex[nicTx->txIdx] = next_index; /* Give descriptor to switch core */ txPkthdrRing[nicTx->txIdx][ret] |= DESC_SWCORE_OWNED; #if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && !defined(CONFIG_RTL_8198C) { uint32 pkthdr2 = (uint32)txPkthdrRing[nicTx->txIdx][ret]; if ((pkthdr2 & DESC_OWNED_BIT) == 0) #ifndef CONFIG_OPENWRT_SDK panic_printk("_swNic_send: idx= %d, read back pkthdr= 0x%x.\n", ret, pkthdr2); #else printk("_swNic_send: idx= %d, read back pkthdr= 0x%x.\n", ret, pkthdr2); #endif } #endif #if 0 memDump((void*)output, 64, "TX"); printk("index %d address 0x%p, 0x%x 0x%p.\n", ret, &txPkthdrRing[nicTx->txIdx][ret], (*(volatile uint32 *)&txPkthdrRing[nicTx->txIdx][ret]), pPkthdr); printk("Flags 0x%x proto 0x%x portlist 0x%x vid %d extPort %d srcExtPort %d len %d.\n", pPkthdr->ph_flags, pPkthdr->ph_proto, pPkthdr->ph_portlist, pPkthdr->ph_vlanId, pPkthdr->ph_extPortList, pPkthdr->ph_srcExtPortNum, pPkthdr->ph_len); #endif /* Set TXFD bit to start send */ REG32(CPUICR) |= TXFD; return ret; }
void Scan_BB_PSD( IN PDM_ODM_T pDM_Odm, int *PSD_report_right, int *PSD_report_left, int len, int initial_gain) { struct rtl8192cd_priv *priv=pDM_Odm->priv; pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable; u1Byte ST_TH_origin; u1Byte idx[20]={//96,99,102,106,109,112,115,118,122,125, 224,227,230,234,237,240,243,246,250,253, 0,3,6,10,13,16,19,22,26,29}; int tone_idx, channel_org, channel, i; // set DFS ST_TH to max value ST_TH_origin = RTL_R8(0x91c); RTL_W8(0x91c, 0x4e); // Turn off CCK ODM_SetBBReg(pDM_Odm, 0x808, BIT28, 0); //808[28] // Turn off TX // Pause TX Queue if (!priv->pmib->dot11DFSEntry.disable_tx) ODM_Write1Byte(pDM_Odm, 0x522, 0xFF); //REG_TXPAUSE 改為0x522 // Turn off CCA if(GET_CHIP_VER(priv) == VERSION_8814A){ ODM_SetBBReg(pDM_Odm, 0x838, BIT1, 0x1); //838[1] 設為1 } else{ ODM_SetBBReg(pDM_Odm, 0x838, BIT3, 0x1); //838[3] 設為1 } // PHYTXON while loop PHY_SetBBReg(priv, 0x8fc, 0xfff, 0); i = 0; while (ODM_GetBBReg(pDM_Odm, 0xfa0, BIT18)) { i++; if (i > 1000000) { panic_printk("Wait in %s() more than %d times!\n", __FUNCTION__, i); break; } } // backup IGI_origin , set IGI = 0x3e; pDM_DigTable->bPSDInProgress = TRUE; odm_PauseDIG(pDM_Odm, PHYDM_PAUSE, PHYDM_PAUSE_LEVEL_7, initial_gain); // Turn off 3-wire ODM_SetBBReg(pDM_Odm, 0xC00, BIT1|BIT0, 0x0); //c00[1:0] 寫0 // pts value = 128, 256, 512, 1024 ODM_SetBBReg(pDM_Odm, 0x910, BIT14|BIT15, 0x1); //910[15:14]設為1, 用256點 ODM_SetBBReg(pDM_Odm, 0x910, BIT12|BIT13, 0x1); //910[13:12]設為1, avg 8 次 // scan in-band PSD channel_org = ODM_GetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, 0x3FF); if(priv, priv->pshare->CurrentChannelBW != HT_CHANNEL_WIDTH_20){ priv->pshare->No_RF_Write = 0; SwBWMode(priv, HT_CHANNEL_WIDTH_20, 0); priv->pshare->No_RF_Write = 1; } if (priv->pshare->rf_ft_var.dfs_scan_inband) { int PSD_report_inband[20]; for (tone_idx=0;tone_idx<len;tone_idx++) PSD_report_inband[tone_idx] = GetPSDData_8812(pDM_Odm, idx[tone_idx], initial_gain); panic_printk("PSD inband: "); for (i=0; i<len; i++) panic_printk("%d ", PSD_report_inband[i]); panic_printk("\n"); } // scan right(higher) neighbor channel if (priv->pshare->CurrentChannelBW == HT_CHANNEL_WIDTH_20) channel = channel_org + 4; else if (priv->pshare->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) channel = channel_org + 6; else channel = channel_org + 10; delay_us(300); // for idle 20M, it will emit signal in right 20M channel priv->pshare->No_RF_Write = 0; ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, 0x3FF, channel); priv->pshare->No_RF_Write = 1; for (tone_idx=0;tone_idx<len;tone_idx++) PSD_report_right[tone_idx] = GetPSDData_8812(pDM_Odm, idx[tone_idx], initial_gain); // scan left(lower) neighbor channel if (priv->pshare->CurrentChannelBW == HT_CHANNEL_WIDTH_20) channel = channel_org - 4; else if (priv->pshare->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) channel = channel_org - 6; else channel = channel_org - 10; priv->pshare->No_RF_Write = 0; ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, 0x3FF, channel); priv->pshare->No_RF_Write = 1; for (tone_idx=0;tone_idx<len;tone_idx++) PSD_report_left[tone_idx] = GetPSDData_8812(pDM_Odm, idx[tone_idx], initial_gain); // restore originl center frequency if(priv, priv->pshare->CurrentChannelBW != HT_CHANNEL_WIDTH_20){ priv->pshare->No_RF_Write = 0; SwBWMode(priv, priv->pshare->CurrentChannelBW, priv->pshare->offset_2nd_chan); priv->pshare->No_RF_Write = 1; } priv->pshare->No_RF_Write = 0; ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, 0x3FF, channel_org); priv->pshare->No_RF_Write = 1; // Turn on 3-wire ODM_SetBBReg(pDM_Odm, 0xc00, BIT1|BIT0, 0x3); //c00[1:0] 寫3 // Restore Current Settings // Resume DIG pDM_DigTable->bPSDInProgress = FALSE; odm_PauseDIG(pDM_Odm, PHYDM_RESUME, PHYDM_PAUSE_LEVEL_7, NONE); //Turn on CCA if(GET_CHIP_VER(priv) == VERSION_8814A){ ODM_SetBBReg(pDM_Odm, 0x838, BIT1, 0); //838[1] 設為0 } else{ ODM_SetBBReg(pDM_Odm, 0x838, BIT3, 0); //838[3] 設為0 } // Turn on TX // Resume TX Queue if (!priv->pmib->dot11DFSEntry.disable_tx) ODM_Write1Byte(pDM_Odm, 0x522, 0x00); //REG_TXPAUSE 改為0x522 // CCK on if (priv->pmib->dot11RFEntry.phyBandSelect == PHY_BAND_2G) ODM_SetBBReg(pDM_Odm, 0x808, BIT28, 1); //808[28] // Resume DFS ST_TH RTL_W8(0x91c, ST_TH_origin); }
struct mmio_access mmio_parse(unsigned long pc, const struct guest_paging_structures *pg_structs, bool is_write) { struct mmio_access access = { .inst_len = 0 }; union opcode op[3] = { }; bool has_rex_r = false; bool does_write; u8 *page = NULL; restart: page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[0].raw = page[pc & PAGE_OFFS_MASK]; if (op[0].rex.code == X86_REX_CODE) { /* REX.W is simply over-read since it is only affects the * memory address in our supported modes which we get from the * virtualization support. */ if (op[0].rex.r) has_rex_r = true; if (op[0].rex.x) goto error_unsupported; pc++; access.inst_len++; goto restart; } switch (op[0].raw) { case X86_OP_MOV_TO_MEM: access.inst_len += 2; access.size = 4; does_write = true; break; case X86_OP_MOV_FROM_MEM: access.inst_len += 2; access.size = 4; does_write = false; break; default: goto error_unsupported; } pc++; page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[1].raw = page[pc & PAGE_OFFS_MASK]; switch (op[1].modrm.mod) { case 0: if (op[1].modrm.rm == 5) /* 32-bit displacement */ goto error_unsupported; else if (op[1].modrm.rm != 4) /* no SIB */ break; access.inst_len++; pc++; page = map_code_page(pg_structs, pc, page); if (!page) goto error_nopage; op[2].raw = page[pc & PAGE_OFFS_MASK]; if (op[2].sib.base == 5) access.inst_len += 4; break; case 1: case 2: if (op[1].modrm.rm == 4) /* SIB */ goto error_unsupported; access.inst_len += op[1].modrm.mod == 1 ? 1 : 4; break; default: goto error_unsupported; } if (has_rex_r) access.reg = 7 - op[1].modrm.reg; else if (op[1].modrm.reg == 4) goto error_unsupported; else access.reg = 15 - op[1].modrm.reg; if (does_write != is_write) goto error_inconsitent; return access; error_nopage: panic_printk("FATAL: unable to map MMIO instruction page\n"); goto error; error_unsupported: panic_printk("FATAL: unsupported instruction (0x%02x 0x%02x 0x%02x)\n", op[0].raw, op[1].raw, op[2].raw); goto error; error_inconsitent: panic_printk("FATAL: inconsistent access, expected %s instruction\n", is_write ? "write" : "read"); error: access.inst_len = 0; return access; }
int rtw_android_priv_cmd2(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rtl8192cd_priv *priv = GET_DEV_PRIV(priv); // char *command2 = NULL; int cmd_num2; int idx; int skip; int ret1=0; struct android_wifi_priv_cmd priv_cmd_s; u8 tmpbuf[360]; int cmdtype; memset(&priv_cmd_s , 0 , sizeof(struct android_wifi_priv_cmd)); if (!ifr->ifr_data) { NDEBUG("fail!\n"); return -1; } if (copy_from_user(&priv_cmd_s, ifr->ifr_data, sizeof(struct android_wifi_priv_cmd))) { NDEBUG("fail\n"); return -1; } NDEBUG2("buf=[%s]\n",priv_cmd_s.buf); if(priv_cmd_s.total_len<360){ if (copy_from_user(tmpbuf, priv_cmd_s.buf, priv_cmd_s.total_len)) { NDEBUG("fail\n"); return -1; } #if 0 for(idx=0;idx<priv_cmd_s.total_len;idx++){ if( (idx+1) %16==0) panic_printk("\n"); panic_printk("[%02X]",tmpbuf[idx]); } #endif }else{ NDEBUG("IE len more than 1024,chk!!!\n"); return -1; } #if 1 cmd_num2 = rtw_android_cmdstr_to_num(tmpbuf); switch(cmd_num2) { case ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE: { skip = strlen(android_wifi_cmd_str[ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE]) + 3; cmdtype = *(tmpbuf + skip - 2) - '0'; #if 0 if(cmdtype==2){ NDEBUG("cmdtype=[%d],buf[%s],len[%d]\n",cmdtype,priv_cmd_s.buf,priv_cmd_s.total_len); for(idx=0;idx<priv_cmd_s.total_len;idx++) printk("[%02x]",priv_cmd_s.buf[idx]); } #endif #ifdef RTK_NL80211 ret1 = rtk_cfg80211_set_wps_p2p_ie(priv, (tmpbuf + skip) , (priv_cmd_s.total_len - skip), cmdtype); #endif break; } default: NDEBUG(" !! unknow cmd_num[%d]\n",cmd_num2); } #endif return 0; }
static enum mmio_result ioapic_access_handler(void *arg, struct mmio_access *mmio) { union ioapic_redir_entry *shadow_table; struct cell_ioapic *ioapic = arg; u32 index, entry; switch (mmio->address) { case IOAPIC_REG_INDEX: if (mmio->is_write) ioapic->index_reg_val = mmio->value; else mmio->value = ioapic->index_reg_val; return MMIO_HANDLED; case IOAPIC_REG_DATA: index = ioapic->index_reg_val; if (index == IOAPIC_ID || index == IOAPIC_VER) { if (mmio->is_write) goto invalid_access; mmio->value = ioapic_reg_read(ioapic->phys_ioapic, index); return MMIO_HANDLED; } if (index < IOAPIC_REDIR_TBL_START || index > IOAPIC_REDIR_TBL_END) goto invalid_access; entry = (index - IOAPIC_REDIR_TBL_START) / 2; if ((ioapic->pin_bitmap & (1UL << entry)) == 0) goto invalid_access; if (mmio->is_write) { if (ioapic_virt_redir_write(ioapic, index, mmio->value) < 0) goto invalid_access; } else { index -= IOAPIC_REDIR_TBL_START; shadow_table = ioapic->phys_ioapic->shadow_redir_table; mmio->value = shadow_table[index / 2].raw[index % 2]; } return MMIO_HANDLED; case IOAPIC_REG_EOI: if (!mmio->is_write || ioapic->pin_bitmap == 0) goto invalid_access; /* * Just write the EOI if the cell has any assigned pin. It * would be complex to virtualize it in a way that cells are * unable to ack vectors of other cells. It is therefore not * recommended to use level-triggered IOAPIC interrupts in * non-root cells. */ mmio_write32(ioapic->phys_ioapic->reg_base + IOAPIC_REG_EOI, mmio->value); return MMIO_HANDLED; } invalid_access: panic_printk("FATAL: Invalid IOAPIC %s, reg: %x, index: %x\n", mmio->is_write ? "write" : "read", mmio->address, ioapic->index_reg_val); return MMIO_ERROR; }
int nat25_handle_frame(_adapter *priv, struct sk_buff *skb) { #ifdef BR_EXT_DEBUG if ((!priv->ethBrExtInfo.nat25_disable) && (!(skb->data[0] & 1))) { panic_printk("NAT25: Input Frame: DA=%02x%02x%02x%02x%02x%02x SA=%02x%02x%02x%02x%02x%02x\n", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]); } #endif if (!(skb->data[0] & 1)) { int is_vlan_tag=0, i, retval=0; unsigned short vlan_hdr=0; unsigned short protocol; protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN))); if (protocol == ETH_P_8021Q) { is_vlan_tag = 1; vlan_hdr = *((unsigned short *)(skb->data+ETH_ALEN*2+2)); for (i=0; i<6; i++) *((unsigned short *)(skb->data+ETH_ALEN*2+2-i*2)) = *((unsigned short *)(skb->data+ETH_ALEN*2-2-i*2)); skb_pull(skb, 4); } if (!priv->ethBrExtInfo.nat25_disable) { _irqL irqL; _enter_critical_bh(&priv->br_ext_lock, &irqL); /* * This function look up the destination network address from * the NAT2.5 database. Return value = -1 means that the * corresponding network protocol is NOT support. */ if (!priv->ethBrExtInfo.nat25sc_disable && (be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) && !memcmp(priv->scdb_ip, skb->data+ETH_HLEN+16, 4)) { memcpy(skb->data, priv->scdb_mac, ETH_ALEN); _exit_critical_bh(&priv->br_ext_lock, &irqL); } else { _exit_critical_bh(&priv->br_ext_lock, &irqL); retval = nat25_db_handle(priv, skb, NAT25_LOOKUP); } } else { if (((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) && !memcmp(priv->br_ip, skb->data+ETH_HLEN+16, 4)) || ((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_ARP) && !memcmp(priv->br_ip, skb->data+ETH_HLEN+24, 4))) { /* for traffic to upper TCP/IP */ retval = nat25_db_handle(priv, skb, NAT25_LOOKUP); } } if (is_vlan_tag) { skb_push(skb, 4); for (i=0; i<6; i++) *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2)); *((__be16 *)(skb->data+ETH_ALEN*2)) = __constant_htons(ETH_P_8021Q); *((unsigned short *)(skb->data+ETH_ALEN*2+2)) = vlan_hdr; } if (retval == -1) { /* DEBUG_ERR("NAT25: Lookup fail!\n"); */ return -1; } } return 0; }
struct mmio_access mmio_parse(struct per_cpu *cpu_data, unsigned long pc, const struct guest_paging_structures *pg_structs, bool is_write) { struct mmio_access access = { .inst_len = 0 }; bool has_regr, has_modrm, does_write; struct modrm modrm; struct sib sib; u8 *page = NULL; access.inst_len = 0; has_regr = false; restart: page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; has_modrm = false; switch (page[pc & PAGE_OFFS_MASK]) { case X86_OP_REGR_PREFIX: if (has_regr) goto error_unsupported; has_regr = true; pc++; access.inst_len++; goto restart; case X86_OP_MOV_TO_MEM: access.inst_len += 2; access.size = 4; has_modrm = true; does_write = true; break; case X86_OP_MOV_FROM_MEM: access.inst_len += 2; access.size = 4; has_modrm = true; does_write = false; break; default: goto error_unsupported; } if (has_modrm) { pc++; page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; modrm = *(struct modrm *)&page[pc & PAGE_OFFS_MASK]; switch (modrm.mod) { case 0: if (modrm.rm != 4) goto error_unsupported; pc++; page = map_code_page(cpu_data, pg_structs, pc, page); if (!page) goto error_nopage; sib = *(struct sib *)&page[pc & PAGE_OFFS_MASK]; if (sib.ss != 0 || sib.index != 4 || sib.reg != 5) goto error_unsupported; access.inst_len += 5; break; case 2: access.inst_len += 4; break; default: goto error_unsupported; } if (has_regr) access.reg = 7 - modrm.reg; else if (modrm.reg == 4) goto error_unsupported; else access.reg = 15 - modrm.reg; } if (does_write != is_write) goto error_inconsitent; return access; error_nopage: panic_printk("FATAL: unable to map MMIO instruction page\n"); goto error; error_unsupported: panic_printk("FATAL: unsupported instruction\n"); goto error; error_inconsitent: panic_printk("FATAL: inconsistent access, expected %s instruction\n", is_write ? "write" : "read"); error: access.inst_len = 0; return access; }
void nat25_db_expire(_adapter *priv) { int i; _irqL irqL; _enter_critical_bh(&priv->br_ext_lock, &irqL); /* if (!priv->ethBrExtInfo.nat25_disable) */ { for (i=0; i<NAT25_HASH_SIZE; i++) { struct nat25_network_db_entry *f; f = priv->nethash[i]; while (f != NULL) { struct nat25_network_db_entry *g; g = f->next_hash; if (__nat25_has_expired(priv, f)) { if (atomic_dec_and_test(&f->use_count)) { #ifdef BR_EXT_DEBUG panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x\n", i, f->macAddr[0], f->macAddr[1], f->macAddr[2], f->macAddr[3], f->macAddr[4], f->macAddr[5], f->networkAddr[0], f->networkAddr[1], f->networkAddr[2], f->networkAddr[3], f->networkAddr[4], f->networkAddr[5], f->networkAddr[6], f->networkAddr[7], f->networkAddr[8], f->networkAddr[9], f->networkAddr[10], f->networkAddr[11], f->networkAddr[12], f->networkAddr[13], f->networkAddr[14], f->networkAddr[15], f->networkAddr[16]); #endif if (priv->scdb_entry == f) { memset(priv->scdb_mac, 0, ETH_ALEN); memset(priv->scdb_ip, 0, 4); priv->scdb_entry = NULL; } __network_hash_unlink(f); rtw_mfree((u8 *) f, sizeof(struct nat25_network_db_entry)); } } f = g; } } } _exit_critical_bh(&priv->br_ext_lock, &irqL); }
void rtl8192cd_dfs_det_chk(struct rtl8192cd_priv *priv) { unsigned int regf98_value; unsigned int reg918_value; unsigned int reg91c_value; unsigned int reg920_value; unsigned int reg924_value; unsigned int FA_count_cur=0, FA_count_inc=0; unsigned int VHT_CRC_ok_cnt_cur=0, VHT_CRC_ok_cnt_inc=0; unsigned int HT_CRC_ok_cnt_cur=0, HT_CRC_ok_cnt_inc=0; unsigned int LEG_CRC_ok_cnt_cur=0, LEG_CRC_ok_cnt_inc=0; unsigned int Total_CRC_OK_cnt_inc=0, FA_CRCOK_ratio=0; unsigned char DFS_tri_short_pulse=0, DFS_tri_long_pulse=0, fa_mask_mid_th=0, fa_mask_lower_th=0; unsigned char radar_type = 0; /* 0 for short, 1 for long */ unsigned int short_pulse_cnt_cur=0, short_pulse_cnt_inc=0; unsigned int long_pulse_cnt_cur=0, long_pulse_cnt_inc=0; unsigned int total_pulse_count_inc=0, max_sht_pusle_cnt_th=0; unsigned int sum, k, fa_flag=0; unsigned int st_L2H_new=0, st_L2H_tmp, index=0, fault_flag_det, fault_flag_psd; int flags=0; unsigned long throughput = 0; int j; int i, PSD_report_right[20], PSD_report_left[20]; int max_right, max_left; int max_fa_in_hist=0, total_fa_in_hist=0, pre_post_now_acc_fa_in_hist=0; if (priv->det_asoc_clear > 0) { priv->det_asoc_clear--; priv->pmib->dot11DFSEntry.DFS_detected = 0; priv->FA_count_pre = 0; priv->VHT_CRC_ok_cnt_pre = 0; priv->HT_CRC_ok_cnt_pre = 0; priv->LEG_CRC_ok_cnt_pre = 0; priv->mask_idx = 0; priv->mask_hist_checked = 0; memset(priv->radar_det_mask_hist, 0, sizeof(priv->radar_det_mask_hist)); memset(priv->pulse_flag_hist, 0, sizeof(priv->pulse_flag_hist)); mod_timer(&priv->dfs_det_chk_timer, jiffies + RTL_MILISECONDS_TO_JIFFIES(priv->pshare->rf_ft_var.dfs_det_period*10)); return; } throughput = priv->ext_stats.tx_avarage+priv->ext_stats.rx_avarage; #ifdef MBSSID if (priv->pmib->miscEntry.vap_enable) { for (j=0; j<RTL8192CD_NUM_VWLAN; j++) { if (IS_DRV_OPEN(priv->pvap_priv[j])) { throughput += priv->pvap_priv[j]->ext_stats.tx_avarage+priv->pvap_priv[j]->ext_stats.rx_avarage; } } } #endif // Get FA count during past 100ms FA_count_cur = PHY_QueryBBReg(priv, 0xf48, 0x0000ffff); if (priv->FA_count_pre == 0) FA_count_inc = 0; else if (FA_count_cur >= priv->FA_count_pre) FA_count_inc = FA_count_cur - priv->FA_count_pre; else FA_count_inc = FA_count_cur; priv->FA_count_pre = FA_count_cur; priv->fa_inc_hist[priv->mask_idx] = FA_count_inc; for (i=0; i<5; i++) { total_fa_in_hist = total_fa_in_hist + priv->fa_inc_hist[i]; if (priv->fa_inc_hist[i] > max_fa_in_hist) max_fa_in_hist = priv->fa_inc_hist[i]; } if (priv->mask_idx >= priv->pshare->rf_ft_var.dfs_det_flag_offset) index = priv->mask_idx - priv->pshare->rf_ft_var.dfs_det_flag_offset; else index = priv->pshare->rf_ft_var.dfs_det_hist_len + priv->mask_idx - priv->pshare->rf_ft_var.dfs_det_flag_offset; if (index == 0) pre_post_now_acc_fa_in_hist = priv->fa_inc_hist[index] + priv->fa_inc_hist[index+1] + priv->fa_inc_hist[4]; else if (index == 4) pre_post_now_acc_fa_in_hist = priv->fa_inc_hist[index] + priv->fa_inc_hist[0] + priv->fa_inc_hist[index-1]; else pre_post_now_acc_fa_in_hist = priv->fa_inc_hist[index] + priv->fa_inc_hist[index+1] + priv->fa_inc_hist[index-1]; // Get VHT CRC32 ok count during past 100ms VHT_CRC_ok_cnt_cur = PHY_QueryBBReg(priv, 0xf0c, 0x00003fff); if (VHT_CRC_ok_cnt_cur >= priv->VHT_CRC_ok_cnt_pre) VHT_CRC_ok_cnt_inc = VHT_CRC_ok_cnt_cur - priv->VHT_CRC_ok_cnt_pre; else VHT_CRC_ok_cnt_inc = VHT_CRC_ok_cnt_cur; priv->VHT_CRC_ok_cnt_pre = VHT_CRC_ok_cnt_cur; // Get HT CRC32 ok count during past 100ms HT_CRC_ok_cnt_cur = PHY_QueryBBReg(priv, 0xf10, 0x00003fff); if (HT_CRC_ok_cnt_cur >= priv->HT_CRC_ok_cnt_pre) HT_CRC_ok_cnt_inc = HT_CRC_ok_cnt_cur - priv->HT_CRC_ok_cnt_pre; else HT_CRC_ok_cnt_inc = HT_CRC_ok_cnt_cur; priv->HT_CRC_ok_cnt_pre = HT_CRC_ok_cnt_cur; // Get Legacy CRC32 ok count during past 100ms LEG_CRC_ok_cnt_cur = PHY_QueryBBReg(priv, 0xf14, 0x00003fff); if (LEG_CRC_ok_cnt_cur >= priv->LEG_CRC_ok_cnt_pre) LEG_CRC_ok_cnt_inc = LEG_CRC_ok_cnt_cur - priv->LEG_CRC_ok_cnt_pre; else LEG_CRC_ok_cnt_inc = LEG_CRC_ok_cnt_cur; priv->LEG_CRC_ok_cnt_pre = LEG_CRC_ok_cnt_cur; if ((VHT_CRC_ok_cnt_cur == 0x3fff) || (HT_CRC_ok_cnt_cur == 0x3fff) || (LEG_CRC_ok_cnt_cur == 0x3fff)) { PHY_SetBBReg(priv, 0xb58, BIT(0), 1); PHY_SetBBReg(priv, 0xb58, BIT(0), 0); } Total_CRC_OK_cnt_inc = VHT_CRC_ok_cnt_inc + HT_CRC_ok_cnt_inc + LEG_CRC_ok_cnt_inc; // check if the FA occrus frequencly during 100ms // FA_count_inc is divided by Total_CRC_OK_cnt_inc, which helps to distinguish normal trasmission from interference if (Total_CRC_OK_cnt_inc > 0) FA_CRCOK_ratio = FA_count_inc / Total_CRC_OK_cnt_inc; //=====dynamic power threshold (DPT) ======== // Get short pulse count, need carefully handle the counter overflow regf98_value = PHY_QueryBBReg(priv, 0xf98, 0xffffffff); short_pulse_cnt_cur = regf98_value & 0x000000ff; if (short_pulse_cnt_cur >= priv->short_pulse_cnt_pre) short_pulse_cnt_inc = short_pulse_cnt_cur - priv->short_pulse_cnt_pre; else short_pulse_cnt_inc = short_pulse_cnt_cur; priv->short_pulse_cnt_pre = short_pulse_cnt_cur; // Get long pulse count, need carefully handle the counter overflow long_pulse_cnt_cur = (regf98_value & 0x0000ff00) >> 8; if (long_pulse_cnt_cur >= priv->long_pulse_cnt_pre) long_pulse_cnt_inc = long_pulse_cnt_cur - priv->long_pulse_cnt_pre; else long_pulse_cnt_inc = long_pulse_cnt_cur; priv->long_pulse_cnt_pre = long_pulse_cnt_cur; total_pulse_count_inc = short_pulse_cnt_inc + long_pulse_cnt_inc; if (priv->pshare->rf_ft_var.dfs_det_print) { panic_printk("=====================================================================\n"); panic_printk("Total_CRC_OK_cnt_inc[%d] VHT_CRC_ok_cnt_inc[%d] HT_CRC_ok_cnt_inc[%d] LEG_CRC_ok_cnt_inc[%d] FA_count_inc[%d] FA_CRCOK_ratio[%d]\n", Total_CRC_OK_cnt_inc, VHT_CRC_ok_cnt_inc, HT_CRC_ok_cnt_inc, LEG_CRC_ok_cnt_inc, FA_count_inc, FA_CRCOK_ratio); panic_printk("Init_Gain[%x] 0x91c[%x] 0xf98[%08x] short_pulse_cnt_inc[%d] long_pulse_cnt_inc[%d]\n", priv->ini_gain_cur, priv->st_L2H_cur, regf98_value, short_pulse_cnt_inc, long_pulse_cnt_inc); panic_printk("Throughput: %luMbps\n", (throughput>>17)); reg918_value = PHY_QueryBBReg(priv, 0x918, 0xffffffff); reg91c_value = PHY_QueryBBReg(priv, 0x91c, 0xffffffff); reg920_value = PHY_QueryBBReg(priv, 0x920, 0xffffffff); reg924_value = PHY_QueryBBReg(priv, 0x924, 0xffffffff); printk("0x918[%08x] 0x91c[%08x] 0x920[%08x] 0x924[%08x]\n", reg918_value, reg91c_value, reg920_value, reg924_value); }