static void cpmac_end_xmit(int queue) { // struct cpmac_desc *desc; //struct cpmac_priv *priv = netdev_priv(dev); // desc = desc_ring[queue]; cpmac_write(CPMAC_TX_ACK(queue), (u32)desc_ring[queue].mapping); if (likely(desc_ring[queue].skb)) { unlock_s(synthlock_0); spin_lock(cplock); netdev.stats.tx_packets++; lock_s(synthlock_0); netdev.stats.tx_bytes += desc_ring[queue].skb->len; spin_unlock(cplock); dma_unmap_single(desc_ring[queue].data_mapping, desc_ring[queue].skb->len, DMA_TO_DEVICE); // if (unlikely(netif_msg_tx_done(priv))) // netdev_dbg(dev, "sent 0x%p, len=%d\n", // desc_ring[queue].skb, desc_ring[queue].skb->len); dev_kfree_skb_irq(desc_ring[queue].skb); desc_ring[queue].skb = NULL; //if (__netif_subqueue_stopped(dev, queue)) unlock_s(synthlock_0); netif_wake_subqueue(); } else { // if (netif_msg_tx_err(priv) && net_ratelimit()) // netdev_warn(dev, "end_xmit: spurious interrupt\n"); //if (__netif_subqueue_stopped(dev, queue)) unlock_s(synthlock_0); netif_wake_subqueue(); } lock_s(synthlock_0); }
void thread_user () { void * tmp; // a. lock(l); // b. make a copy of ipath_pd tmp = ipath_pd; unlock(l); // c. perform some blocking operations on it if (tmp) { // d. block = 1; // e. block = 0; }; // f. deallocate lock_s(l1); ipath_pd = (void *)0; unlock_s(l1); // g. }
void free_irq() { reset(cond_irq_enabled); unlock_s(synthlock_0); lock(irq_running_lock); unlock(irq_running_lock); lock_s(synthlock_0); }
// OS model void thread_3() { lock_s(l1); assume_not(napi_poll); rtl8169_poll(); unlock_s(l1); }
// driver entry point void thread_1() { lock_s(l1); /*(1)*/ stuff1(); /*(2)*/ notify(napi_poll); // disable NAPI loop unlock_s(l1); }
void thread_1() { // 1 lock_s(synthlock_0); register_netdev(); // 2 hw_start = &start_device; unlock_s(synthlock_0); }
void thread_i915_disable_vblank() { lock_s(synthlock_1); unsigned int tmp; // a. lock_s(synthlock_0); irq_enable = 0; // b. tmp = irq_status; // c. unlock_s(synthlock_0); irq_status = tmp & (~EN_MASK) & (~STAT_MASK); unlock_s(synthlock_1); // d. // assert (((irq_enable == 1) && (irq_status & EN_MASK)) || // ((irq_enable == 0) && !(irq_status & EN_MASK))); }
void thread_i915_enable_vblank() { unsigned int tmp; // 1. lock_s(synthlock_1); tmp = irq_status; lock_s(synthlock_0); // 2. irq_status = (tmp | EN_MASK) & (~STAT_MASK); // 3. unlock_s(synthlock_1); irq_enable = 1; unlock_s(synthlock_0); // 4. // assert (((irq_enable == 1) && (irq_status & EN_MASK)) || // ((irq_enable == 0) && !(irq_status & EN_MASK))); }
static int cpmac_start_xmit(struct sk_buff *skb) { int queue, len, ret; lock_s(l1); //struct cpmac_desc *desc; //struct cpmac_priv *priv = netdev_priv(dev); //if (unlikely(atomic_read(reset_pending))) // return NETDEV_TX_BUSY; //cpmac_write(CPMAC_TX_PTR(queue), (u32)desc_ring[queue].mapping); // BUG: move this line to the *** location below notify(cond_irq_can_happen); if (unlikely(skb_padto(skb, ETH_ZLEN))) { ret = NETDEV_TX_OK; } else { len = max(skb->len, ETH_ZLEN); //queue = skb_get_queue_mapping(skb); netif_stop_subqueue(/*queue*/); //desc = &desc_ring[queue]; if (unlikely(desc_ring[queue].dataflags & CPMAC_OWN)) { // if (netif_msg_tx_err(priv) && net_ratelimit()) // netdev_warn(dev, "tx dma ring full\n"); ret = NETDEV_TX_BUSY; } else { spin_lock(cplock); spin_unlock(cplock); desc_ring[queue].dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc_ring[queue].skb = skb; desc_ring[queue].data_mapping = dma_map_single(skb->data, len, DMA_TO_DEVICE); desc_ring[queue].hw_data = (u32)desc_ring[queue].data_mapping; desc_ring[queue].datalen = len; desc_ring[queue].buflen = len; // if (unlikely(netif_msg_tx_queued(priv))) // netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); // if (unlikely(netif_msg_hw(priv))) // cpmac_dump_desc(dev, &desc_ring[queue]); // if (unlikely(netif_msg_pktdata(priv))) // cpmac_dump_skb(dev, skb); ret = NETDEV_TX_OK; } } // *** unlock_s(l1); return ret; }
void add_s(long long *pointer, long long value) { lock_s(); long long sum = *pointer + value; if (opt_yield) pthread_yield(); *pointer = sum; unlock_s(); }
/* A client has stopped using the device. * Power down the device if this is the last client. */ void i2c_hid_close () { int x; lock(l); lock_s(synthlock_1); lock_s(synthlock_0); reset(open); unlock_s(synthlock_0); if (nondet) { assume_not(open); power_on = 0; } x = power_on; unlock_s(synthlock_1); //assert (power_on == 0); unlock(l); }
void thread_serial_bus () { lock_serial_bus(); assume (port_dev_registered); usb_serial_device_probe (); unlock_serial_bus(); lock_s(synthlock_1); assume_not (port_dev_registered); unlock_s(synthlock_1); lock_serial_bus(); usb_serial_device_remove (); unlock_serial_bus(); }
/* A client wants to start using the device. * Powers up the device if it is currently closed. */ void i2c_hid_open() { int x; // lock(l); if (nondet) { lock_s(synthlock_1); assume_not(open); power_on = 1; lock_s(synthlock_0); } else { lock_s(synthlock_1); lock_s(synthlock_0); assume(open); } notify(open); unlock_s(synthlock_0); x = power_on; unlock_s(synthlock_1); //assert (power_on != 0); // unlock(l); }
//static int cpmac_open(struct net_device *dev); // //static void cpmac_dump_regs(struct net_device *dev) //{ // int i; // struct cpmac_priv *priv = netdev_priv(dev); // // for (i = 0; i < CPMAC_REG_END; i += 4) { // if (i % 16 == 0) { // if (i) // printk("\n"); // printk("%s: reg[%p]:", dev->name, priv->regs + i); // } // printk(" %08x", cpmac_read(priv->regs, i)); // } // printk("\n"); //} // //static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) //{ // int i; // // printk("%s: desc[%p]:", dev->name, desc); // for (i = 0; i < sizeof(*desc) / 4; i++) // printk(" %08x", ((u32 *)desc)[i]); // printk("\n"); //} // //static void cpmac_dump_all_desc(struct net_device *dev) //{ // struct cpmac_priv *priv = netdev_priv(dev); // struct cpmac_desc *dump = priv->rx_head; // // do { // cpmac_dump_desc(dev, dump); // dump = dump->next; // } while (dump != priv->rx_head); //} // //static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) //{ // int i; // // printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); // for (i = 0; i < skb->len; i++) { // if (i % 16 == 0) { // if (i) // printk("\n"); // printk("%s: data[%p]:", dev->name, skb->data + i); // } // printk(" %02x", ((u8 *)skb->data)[i]); // } // printk("\n"); //} // //static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) //{ // u32 val; // // while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) // cpu_relax(); // cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | // MDIO_PHY(phy_id)); // while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) // cpu_relax(); // // return MDIO_DATA(val); //} // //static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, // int reg, u16 val) //{ // while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) // cpu_relax(); // cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | // MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); // // return 0; //} // static int cpmac_mdio_reset() { // struct clk *cpmac_clk; // cpmac_clk = clk_get("cpmac"); // if (IS_ERR(cpmac_clk)) { // pr_err("unable to get cpmac clock\n"); // return -1; // } ar7_device_reset(AR7_RESET_BIT_MDIO); lock_s(synthlock_0); cpmac_write(CPMAC_MDIO_CONTROL, MDIOC_ENABLE | MDIOC_CLKDIV(/*clk_get_rate(cpmac_clk)*/nondet / 2200000 - 1)); unlock_s(synthlock_0); return 0; }
void usb_serial_device_remove () { lock_s(synthlock_1); int x; x = port_initialized; x = dev_usb_serial_initialized; //assert (dev_usb_serial_initialized>=0); /* make sure suspend/resume doesn't race against port_remove */ dev_autopm++; reset(port_tty_registered); //belkin_port_remove(); unlock_s(synthlock_1); dev_autopm--; }
void thread_irq () { void * x; // 1. lock(l); lock_s(l1); // 2. check that ipath_pd is not NULL if (ipath_pd) { // 3. use ipath_pd //assert (ipath_pd); x = ipath_pd; } unlock_s(l1); // 4. unlock(l); }
static void cpmac_hw_start() { int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); lock_s(synthlock_0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write_CPMAC_RX_PTR(0, rx_head->mapping); cpmac_write(CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST); cpmac_write(CPMAC_BUFFER_OFFSET, 0); //for (i = 0; i < 8; i++) cpmac_write(CPMAC_MAC_ADDR_LO(i), netdev.dev_addr[5]); cpmac_write(CPMAC_MAC_ADDR_MID, netdev.dev_addr[4]); cpmac_write(CPMAC_MAC_ADDR_HI, netdev.dev_addr[0] | (netdev.dev_addr[1] << 8) | (netdev.dev_addr[2] << 16) | (netdev.dev_addr[3] << 24)); cpmac_write(CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(CPMAC_UNICAST_ENABLE, 1); cpmac_write(CPMAC_RX_INT_ENABLE, 1); cpmac_write(CPMAC_TX_INT_ENABLE, 0xff); cpmac_write(CPMAC_MAC_INT_ENABLE, 3); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) | 1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) | 1); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) | MAC_MII | MAC_FDX); unlock_s(synthlock_0); }
void usb_serial_put () { int old; int x; //assert (dev_usb_serial_initialized > 0); //atomicBegin(); old = dev_usb_serial_initialized; dev_usb_serial_initialized--; //atomicEnd(); if (old == 1) { //release_minors(serial); if (nondet) { assume (port_idr_registered); lock_table(); x = fw_idr_consistent; fw_idr_consistent = 0; reset(port_idr_registered); fw_idr_consistent = 1; unlock_table(); } //belkin_release (); /* Now that nothing is using the ports, they can be freed */ lock_serial_bus(); lock_s(synthlock_1); reset(port_dev_registered); unlock_serial_bus(); assume_not (port_tty_registered); unlock_s(synthlock_1); dev_usb_serial_initialized = -1; port_initialized = 0; reset(drv_module_ref_cnt); //drv_module_ref_cnt--; } }
static void cpmac_hw_stop(/*struct net_device *dev*/) { lock_s(synthlock_0); int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) & ~1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) & ~1); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); unlock_s(synthlock_0); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) & ~MAC_MII); }
void lock_serial_bus () { unlock_s(synthlock_0); lock(fw_serial_bus_lock); lock_s(synthlock_0); }
void rtl8169_open() { lock_s(synthlock_0); (*hw_start)(); unlock_s(synthlock_0); }