/* * Use IPI to get all target uvhubs to release resources held by * a given sending cpu number. */ static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender) { int uvhub; int maskbits; cpumask_t mask; struct reset_args reset_args; pax_track_stack(); reset_args.sender = sender; cpus_clear(mask); /* find a single cpu for each uvhub in this distribution mask */ maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE; for (uvhub = 0; uvhub < maskbits; uvhub++) { int cpu; if (!bau_uvhub_isset(uvhub, distribution)) continue; /* find a cpu for this uvhub */ cpu = uvhub_to_first_cpu(uvhub); cpu_set(cpu, mask); } /* IPI all cpus; preemption is already disabled */ smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1); return; }
static ssize_t read_file_slot(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; pax_track_stack(); spin_lock_bh(&priv->tx.tx_lock); len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : "); len += bitmap_scnprintf(buf + len, sizeof(buf) - len, priv->tx.tx_slot, MAX_TX_BUF_NUM); len += snprintf(buf + len, sizeof(buf) - len, "\n"); len += snprintf(buf + len, sizeof(buf) - len, "Used slots : %d\n", bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM)); spin_unlock_bh(&priv->tx.tx_lock); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_int_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; pax_track_stack(); memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_INT_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RX", be32_to_cpu(cmd_rsp.rx)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RXORN", be32_to_cpu(cmd_rsp.rxorn)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RXEOL", be32_to_cpu(cmd_rsp.rxeol)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TXURN", be32_to_cpu(cmd_rsp.txurn)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TXTO", be32_to_cpu(cmd_rsp.txto)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "CST", be32_to_cpu(cmd_rsp.cst)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
asmlinkage void early_printk(const char *fmt, ...) { char buf[512]; int n; va_list ap; pax_track_stack(); va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); early_console->write(early_console, buf, n); va_end(ap); }
static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, size_t len, bool fixed) { struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; struct spi_message m; u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; u32 *cmd; u32 chunk_len; int i; pax_track_stack(); WARN_ON(len > WL1271_AGGR_BUFFER_SIZE); spi_message_init(&m); memset(t, 0, sizeof(t)); cmd = &commands[0]; i = 0; while (len > 0) { chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len); *cmd = 0; *cmd |= WSPI_CMD_WRITE; *cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; *cmd |= addr & WSPI_CMD_BYTE_ADDR; if (fixed) *cmd |= WSPI_CMD_FIXED; t[i].tx_buf = cmd; t[i].len = sizeof(*cmd); spi_message_add_tail(&t[i++], &m); t[i].tx_buf = buf; t[i].len = chunk_len; spi_message_add_tail(&t[i++], &m); wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len); if (!fixed) addr += chunk_len; buf += chunk_len; len -= chunk_len; cmd++; } spi_sync(wl_to_spi(wl), &m); }
static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; pax_track_stack(); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Buffers queued", priv->debug.tx_stats.buf_queued); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Buffers completed", priv->debug.tx_stats.buf_completed); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs queued", priv->debug.tx_stats.skb_queued); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs success", priv->debug.tx_stats.skb_success); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs failed", priv->debug.tx_stats.skb_failed); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "CAB queued", priv->debug.tx_stats.cab_queued); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "BE queued", priv->debug.tx_stats.queue_stats[WME_AC_BE]); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "BK queued", priv->debug.tx_stats.queue_stats[WME_AC_BK]); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "VI queued", priv->debug.tx_stats.queue_stats[WME_AC_VI]); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "VO queued", priv->debug.tx_stats.queue_stats[WME_AC_VO]); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
/* Neat trick to map patch type back to the call within the * corresponding structure. */ static void *get_call_destination(u8 type) { const struct paravirt_patch_template tmpl = { .pv_init_ops = pv_init_ops, .pv_time_ops = pv_time_ops, .pv_cpu_ops = pv_cpu_ops, .pv_irq_ops = pv_irq_ops, .pv_apic_ops = pv_apic_ops, .pv_mmu_ops = pv_mmu_ops, #ifdef CONFIG_PARAVIRT_SPINLOCKS .pv_lock_ops = pv_lock_ops, #endif }; pax_track_stack(); return *((void **)&tmpl + type); }
static ssize_t read_file_queue(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; pax_track_stack(); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Failed queue", skb_queue_len(&priv->tx.tx_failed)); spin_lock_bh(&priv->tx.tx_lock); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Queued count", priv->tx.queued_cnt); spin_unlock_bh(&priv->tx.tx_lock); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_rx_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; pax_track_stack(); memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_RX_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "NoBuf", be32_to_cpu(cmd_rsp.nobuf)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "HostSend", be32_to_cpu(cmd_rsp.host_send)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "HostDone", be32_to_cpu(cmd_rsp.host_done)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[71 + STA_TID_NUM * 40], *p = buf; int i; struct sta_info *sta = file->private_data; struct tid_ampdu_rx *tid_rx; struct tid_ampdu_tx *tid_tx; pax_track_stack(); rcu_read_lock(); p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", sta->ampdu_mlme.dialog_token_allocator + 1); p += scnprintf(p, sizeof(buf) + buf - p, "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); for (i = 0; i < STA_TID_NUM; i++) { tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", tid_rx ? tid_rx->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", tid_rx ? tid_rx->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", tid_tx ? tid_tx->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", tid_tx ? skb_queue_len(&tid_tx->pending) : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\n"); } rcu_read_unlock(); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); }
void __init check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, *x; char sym_p[KSYM_SYMBOL_LEN]; char sym_q[KSYM_SYMBOL_LEN]; pax_track_stack(); /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); x = find_dependents_of(start, finish, q); if (p == x) { sprint_symbol(sym_p, (unsigned long)p->detect); sprint_symbol(sym_q, (unsigned long)q->detect); printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ " on %s and vice-versa. BREAKING IT.\n", sym_p, sym_q); /* Heavy handed way..*/ x->depend = 0; } } for (p = start; p < finish; p++) { q = find_dependents_of(p, finish, p); if (q && q > p) { sprint_symbol(sym_p, (unsigned long)p->detect); sprint_symbol(sym_q, (unsigned long)q->detect); printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ "should be called before %s!\n", sym_p, sym_q); } } }
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { #define PRINT_HT_CAP(_cond, _str) \ do { \ if (_cond) \ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ } while (0) char buf[512], *p = buf; int i; struct sta_info *sta = file->private_data; struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; pax_track_stack(); p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", htc->ht_supported ? "" : "not "); if (htc->ht_supported) { p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " "3839 bytes"); PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " "7935 bytes"); /* * For beacons and probe response this would mean the BSS * does or does not allow the usage of DSSS/CCK HT40. * Otherwise it means the STA does or does not use * DSSS/CCK HT40. */ PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); /* BIT(13) is reserved */ PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", htc->ampdu_factor, htc->ampdu_density); p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", htc->mcs.rx_mask[i]); p += scnprintf(p, sizeof(buf)+buf-p, "\n"); /* If not set this is meaningless */ if (le16_to_cpu(htc->mcs.rx_highest)) { p += scnprintf(p, sizeof(buf)+buf-p, "MCS rx highest: %d Mbps\n", le16_to_cpu(htc->mcs.rx_highest)); } p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", htc->mcs.tx_params); }
/* * Send or receive packet. */ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, int msg_flags) { struct socket *sock = lo->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; pax_track_stack(); if (unlikely(!sock)) { printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n", lo->disk->disk_name, (send ? "send" : "recv")); return -EINVAL; } /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) { struct timer_list ti; if (lo->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; ti.expires = jiffies + lo->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); if (lo->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (signal_pending(current)) { siginfo_t info; printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", task_pid_nr(current), current->comm, dequeue_signal_lock(current, ¤t->blocked, &info)); result = -EINTR; sock_shutdown(lo, !send); break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); return result; }
static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock) { u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN, prevEUN, status; struct mtd_info *mtd = inftl->mbd.mtd; int block, silly; unsigned int targetEUN; struct inftl_oob oob; size_t retlen; pax_track_stack(); DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," "pending=%d)\n", inftl, thisVUC, pendingblock); memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockDeleted, 0, sizeof(BlockDeleted)); thisEUN = targetEUN = inftl->VUtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "INFTL: trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* * Scan to find the Erase Unit which holds the actual data for each * 512-byte block within the Chain. */ silly = MAX_LOOPS; while (thisEUN < inftl->nb_blocks) { for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { if ((BlockMap[block] != BLOCK_NIL) || BlockDeleted[block]) continue; if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + (block * SECTORSIZE), 16, &retlen, (char *)&oob) < 0) status = SECTOR_IGNORE; else status = oob.b.Status | oob.b.Status1; switch(status) { case SECTOR_FREE: case SECTOR_IGNORE: break; case SECTOR_USED: BlockMap[block] = thisEUN; continue; case SECTOR_DELETED: BlockDeleted[block] = 1; continue; default: printk(KERN_WARNING "INFTL: unknown status " "for block %d in EUN %d: %x\n", block, thisEUN, status); break; } } if (!silly--) { printk(KERN_WARNING "INFTL: infinite loop in Virtual " "Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = inftl->PUtable[thisEUN]; } /* * OK. We now know the location of every block in the Virtual Unit * Chain, and the Erase Unit into which we are supposed to be copying. * Go for it. */ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { unsigned char movebuf[SECTORSIZE]; int ret; /* * If it's in the target EUN already, or if it's pending write, * do nothing. */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) { continue; } /* * Copy only in non free block (free blocks can only * happen in case of media errors or deleted blocks). */ if (BlockMap[block] == BLOCK_NIL) continue; ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret < 0 && ret != -EUCLEAN) { ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf); if (ret != -EIO) DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " "away on retry?\n"); } memset(&oob, 0xff, sizeof(struct inftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) + (block * SECTORSIZE), SECTORSIZE, &retlen, movebuf, (char *)&oob); } /* * Newest unit in chain now contains data from _all_ older units. * So go through and erase each unit in chain, oldest first. (This * is important, by doing oldest first if we crash/reboot then it * it is relatively simple to clean up the mess). */ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n", thisVUC); for (;;) { /* Find oldest unit in chain. */ thisEUN = inftl->VUtable[thisVUC]; prevEUN = BLOCK_NIL; while (inftl->PUtable[thisEUN] != BLOCK_NIL) { prevEUN = thisEUN; thisEUN = inftl->PUtable[thisEUN]; } /* Check if we are all done */ if (thisEUN == targetEUN) break; /* Unlink the last block from the chain. */ inftl->PUtable[prevEUN] = BLOCK_NIL; /* Now try to erase it. */ if (INFTL_formatblock(inftl, thisEUN) < 0) { /* * Could not erase : mark block as reserved. */ inftl->PUtable[thisEUN] = BLOCK_RESERVED; } else { /* Correctly erased : mark it as free */ inftl->PUtable[thisEUN] = BLOCK_FREE; inftl->numfreeEUNs++; } } return targetEUN; }
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) { struct mtd_info *mtd = nftl->mbd.mtd; u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockLastState[MAX_SECTORS_PER_UNIT]; unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN; int block; int silly; unsigned int targetEUN; struct nftl_oob oob; int inplace = 1; size_t retlen; pax_track_stack(); memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); thisEUN = nftl->EUNtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "Trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* Scan to find the Erase Unit which holds the actual data for each 512-byte block within the Chain. */ silly = MAX_LOOPS; targetEUN = BLOCK_NIL; while (thisEUN <= nftl->lastEUN ) { unsigned int status, foldmark; targetEUN = thisEUN; for (block = 0; block < nftl->EraseSize / 512; block ++) { nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) + (block * 512), 16 , &retlen, (char *)&oob); if (block == 2) { foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; if (foldmark == FOLD_MARK_IN_PROGRESS) { DEBUG(MTD_DEBUG_LEVEL1, "Write Inhibited on EUN %d\n", thisEUN); inplace = 0; } else { /* There's no other reason not to do inplace, except ones that come later. So we don't need to preserve inplace */ inplace = 1; } } status = oob.b.Status | oob.b.Status1; BlockLastState[block] = status; switch(status) { case SECTOR_FREE: BlockFreeFound[block] = 1; break; case SECTOR_USED: if (!BlockFreeFound[block]) BlockMap[block] = thisEUN; else printk(KERN_WARNING "SECTOR_USED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_DELETED: if (!BlockFreeFound[block]) BlockMap[block] = BLOCK_NIL; else printk(KERN_WARNING "SECTOR_DELETED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_IGNORE: break; default: printk("Unknown status for block %d in EUN %d: %x\n", block, thisEUN, status); } } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = nftl->ReplUnitTable[thisEUN]; } if (inplace) { /* We're being asked to be a fold-in-place. Check that all blocks which actually have data associated with them (i.e. BlockMap[block] != BLOCK_NIL) are either already present or SECTOR_FREE in the target block. If not, we're going to have to fold out-of-place anyway. */ for (block = 0; block < nftl->EraseSize / 512 ; block++) { if (BlockLastState[block] != SECTOR_FREE && BlockMap[block] != BLOCK_NIL && BlockMap[block] != targetEUN) { DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " "block %d was %x lastEUN, " "and is in EUN %d (%s) %d\n", thisVUC, block, BlockLastState[block], BlockMap[block], BlockMap[block]== targetEUN ? "==" : "!=", targetEUN); inplace = 0; break; } } if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) && pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != SECTOR_FREE) { DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " "Folding out of place.\n", targetEUN); inplace = 0; } } if (!inplace) { DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " "Trying out-of-place\n", thisVUC); /* We need to find a targetEUN to fold into. */ targetEUN = NFTL_findfreeblock(nftl, 1); if (targetEUN == BLOCK_NIL) { /* Ouch. Now we're screwed. We need to do a fold-in-place of another chain to make room for this one. We need a better way of selecting which chain to fold, because makefreeblock will only ask us to fold the same one again. */ printk(KERN_WARNING "NFTL_findfreeblock(desperate) returns 0xffff.\n"); return BLOCK_NIL; } } else { /* We put a fold mark in the chain we are folding only if we fold in place to help the mount check code. If we do not fold in place, it is possible to find the valid chain by selecting the longer one */ oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS); oob.u.c.unused = 0xffffffff; nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8, 8, &retlen, (char *)&oob.u); } /* OK. We now know the location of every block in the Virtual Unit Chain, and the Erase Unit into which we are supposed to be copying. Go for it. */ DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < nftl->EraseSize / 512 ; block++) { unsigned char movebuf[512]; int ret; /* If it's in the target EUN already, or if it's pending write, do nothing */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) { continue; } /* copy only in non free block (free blocks can only happen in case of media errors or deleted blocks) */ if (BlockMap[block] == BLOCK_NIL) continue; ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret < 0 && ret != -EUCLEAN) { ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret != -EIO) printk("Error went away on retry.\n"); } memset(&oob, 0xff, sizeof(struct nftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512), 512, &retlen, movebuf, (char *)&oob); } /* add the header so that it is now a valid chain */ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL; nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, 8, &retlen, (char *)&oob.u); /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */ /* At this point, we have two different chains for this Virtual Unit, and no way to tell them apart. If we crash now, we get confused. However, both contain the same data, so we shouldn't actually lose data in this case. It's just that when we load up on a medium which has duplicate chains, we need to free one of the chains because it's not necessary any more. */ thisEUN = nftl->EUNtable[thisVUC]; DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); /* For each block in the old chain (except the targetEUN of course), free it and make it available for future use */ while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) { unsigned int EUNtmp; EUNtmp = nftl->ReplUnitTable[thisEUN]; if (NFTL_formatblock(nftl, thisEUN) < 0) { /* could not erase : mark block as reserved */ nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED; } else { /* correctly erased : mark it as free */ nftl->ReplUnitTable[thisEUN] = BLOCK_FREE; nftl->numfreeEUNs++; } thisEUN = EUNtmp; } /* Make this the new start of chain for thisVUC */ nftl->ReplUnitTable[targetEUN] = BLOCK_NIL; nftl->EUNtable[thisVUC] = targetEUN; return targetEUN; }
static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_tx_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; pax_track_stack(); memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_TX_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Xretries", be32_to_cpu(cmd_rsp.xretries)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "FifoErr", be32_to_cpu(cmd_rsp.fifoerr)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Filtered", be32_to_cpu(cmd_rsp.filtered)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TimerExp", be32_to_cpu(cmd_rsp.timer_exp)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "ShortRetries", be32_to_cpu(cmd_rsp.shortretries)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "LongRetries", be32_to_cpu(cmd_rsp.longretries)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "QueueNull", be32_to_cpu(cmd_rsp.qnull)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "EncapFail", be32_to_cpu(cmd_rsp.encap_fail)); len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "NoBuf", be32_to_cpu(cmd_rsp.nobuf)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); }
/* * process, that is going to call fix_nodes/do_balance must hold only * one path. If it holds 2 or more, it can get into endless waiting in * get_empty_nodes or its clones */ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int retval; INITIALIZE_PATH(old_entry_path); INITIALIZE_PATH(new_entry_path); INITIALIZE_PATH(dot_dot_entry_path); struct item_head new_entry_ih, old_entry_ih, dot_dot_ih; struct reiserfs_dir_entry old_de, new_de, dot_dot_de; struct inode *old_inode, *new_dentry_inode; struct reiserfs_transaction_handle th; int jbegin_count; umode_t old_inode_mode; unsigned long savelink = 1; struct timespec ctime; pax_track_stack(); /* three balancings: (1) old name removal, (2) new name insertion and (3) maybe "save" link insertion stat data updates: (1) old directory, (2) new directory and (3) maybe old object stat data (when it is directory) and (4) maybe stat data of object to which new entry pointed initially and (5) maybe block containing ".." of renamed directory quota updates: two parent directories */ jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 5 + 4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb); dquot_initialize(old_dir); dquot_initialize(new_dir); old_inode = old_dentry->d_inode; new_dentry_inode = new_dentry->d_inode; // make sure, that oldname still exists and points to an object we // are going to rename old_de.de_gen_number_bit_string = NULL; reiserfs_write_lock(old_dir->i_sb); retval = reiserfs_find_entry(old_dir, old_dentry->d_name.name, old_dentry->d_name.len, &old_entry_path, &old_de); pathrelse(&old_entry_path); if (retval == IO_ERROR) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } if (retval != NAME_FOUND || old_de.de_objectid != old_inode->i_ino) { reiserfs_write_unlock(old_dir->i_sb); return -ENOENT; } old_inode_mode = old_inode->i_mode; if (S_ISDIR(old_inode_mode)) { // make sure, that directory being renamed has correct ".." // and that its new parent directory has not too many links // already if (new_dentry_inode) { if (!reiserfs_empty_dir(new_dentry_inode)) { reiserfs_write_unlock(old_dir->i_sb); return -ENOTEMPTY; } } /* directory is renamed, its parent directory will be changed, ** so find ".." entry */ dot_dot_de.de_gen_number_bit_string = NULL; retval = reiserfs_find_entry(old_inode, "..", 2, &dot_dot_entry_path, &dot_dot_de); pathrelse(&dot_dot_entry_path); if (retval != NAME_FOUND) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } /* inode number of .. must equal old_dir->i_ino */ if (dot_dot_de.de_objectid != old_dir->i_ino) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } } retval = journal_begin(&th, old_dir->i_sb, jbegin_count); if (retval) { reiserfs_write_unlock(old_dir->i_sb); return retval; } /* add new entry (or find the existing one) */ retval = reiserfs_add_entry(&th, new_dir, new_dentry->d_name.name, new_dentry->d_name.len, old_inode, 0); if (retval == -EEXIST) { if (!new_dentry_inode) { reiserfs_panic(old_dir->i_sb, "vs-7050", "new entry is found, new inode == 0"); } } else if (retval) { int err = journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return err ? err : retval; } reiserfs_update_inode_transaction(old_dir); reiserfs_update_inode_transaction(new_dir); /* this makes it so an fsync on an open fd for the old name will ** commit the rename operation */ reiserfs_update_inode_transaction(old_inode); if (new_dentry_inode) reiserfs_update_inode_transaction(new_dentry_inode); while (1) { // look for old name using corresponding entry key (found by reiserfs_find_entry) if ((retval = search_by_entry_key(new_dir->i_sb, &old_de.de_entry_key, &old_entry_path, &old_de)) != NAME_FOUND) { pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&old_entry_ih, get_ih(&old_entry_path)); reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1); // look for new name by reiserfs_find_entry new_de.de_gen_number_bit_string = NULL; retval = reiserfs_find_entry(new_dir, new_dentry->d_name.name, new_dentry->d_name.len, &new_entry_path, &new_de); // reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from // reiserfs_add_entry above, and we'll catch any i/o errors before we get here. if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) { pathrelse(&new_entry_path); pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&new_entry_ih, get_ih(&new_entry_path)); reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1); if (S_ISDIR(old_inode->i_mode)) { if ((retval = search_by_entry_key(new_dir->i_sb, &dot_dot_de.de_entry_key, &dot_dot_entry_path, &dot_dot_de)) != NAME_FOUND) { pathrelse(&dot_dot_entry_path); pathrelse(&new_entry_path); pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&dot_dot_ih, get_ih(&dot_dot_entry_path)); // node containing ".." gets into transaction reiserfs_prepare_for_journal(old_inode->i_sb, dot_dot_de.de_bh, 1); } /* we should check seals here, not do this stuff, yes? Then, having gathered everything into RAM we should lock the buffers, yes? -Hans */ /* probably. our rename needs to hold more ** than one path at once. The seals would ** have to be written to deal with multi-path ** issues -chris */ /* sanity checking before doing the rename - avoid races many ** of the above checks could have scheduled. We have to be ** sure our items haven't been shifted by another process. */ if (item_moved(&new_entry_ih, &new_entry_path) || !entry_points_to_object(new_dentry->d_name.name, new_dentry->d_name.len, &new_de, new_dentry_inode) || item_moved(&old_entry_ih, &old_entry_path) || !entry_points_to_object(old_dentry->d_name.name, old_dentry->d_name.len, &old_de, old_inode)) { reiserfs_restore_prepared_buffer(old_inode->i_sb, new_de.de_bh); reiserfs_restore_prepared_buffer(old_inode->i_sb, old_de.de_bh); if (S_ISDIR(old_inode_mode)) reiserfs_restore_prepared_buffer(old_inode-> i_sb, dot_dot_de. de_bh); continue; } if (S_ISDIR(old_inode_mode)) { if (item_moved(&dot_dot_ih, &dot_dot_entry_path) || !entry_points_to_object("..", 2, &dot_dot_de, old_dir)) { reiserfs_restore_prepared_buffer(old_inode-> i_sb, old_de.de_bh); reiserfs_restore_prepared_buffer(old_inode-> i_sb, new_de.de_bh); reiserfs_restore_prepared_buffer(old_inode-> i_sb, dot_dot_de. de_bh); continue; } } RFALSE(S_ISDIR(old_inode_mode) && !buffer_journal_prepared(dot_dot_de.de_bh), ""); break; } /* ok, all the changes can be done in one fell swoop when we have claimed all the buffers needed. */ mark_de_visible(new_de.de_deh + new_de.de_entry_num); set_ino_in_dir_entry(&new_de, INODE_PKEY(old_inode)); journal_mark_dirty(&th, old_dir->i_sb, new_de.de_bh); mark_de_hidden(old_de.de_deh + old_de.de_entry_num); journal_mark_dirty(&th, old_dir->i_sb, old_de.de_bh); ctime = CURRENT_TIME_SEC; old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; /* thanks to Alex Adriaanse <*****@*****.**> for patch which adds ctime update of renamed object */ old_inode->i_ctime = ctime; if (new_dentry_inode) { // adjust link number of the victim if (S_ISDIR(new_dentry_inode->i_mode)) { clear_nlink(new_dentry_inode); } else { drop_nlink(new_dentry_inode); } new_dentry_inode->i_ctime = ctime; savelink = new_dentry_inode->i_nlink; } if (S_ISDIR(old_inode_mode)) { /* adjust ".." of renamed directory */ set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir)); journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh); if (!new_dentry_inode) /* there (in new_dir) was no directory, so it got new link (".." of renamed directory) */ INC_DIR_INODE_NLINK(new_dir); /* old directory lost one link - ".. " of renamed directory */ DEC_DIR_INODE_NLINK(old_dir); } // looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse pathrelse(&new_entry_path); pathrelse(&dot_dot_entry_path); // FIXME: this reiserfs_cut_from_item's return value may screw up // anybody, but it will panic if will not be able to find the // entry. This needs one more clean up if (reiserfs_cut_from_item (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL, 0) < 0) reiserfs_error(old_dir->i_sb, "vs-7060", "couldn't not cut old name. Fsck later?"); old_dir->i_size -= DEH_SIZE + old_de.de_entrylen; reiserfs_update_sd(&th, old_dir); reiserfs_update_sd(&th, new_dir); reiserfs_update_sd(&th, old_inode); if (new_dentry_inode) { if (savelink == 0) add_save_link(&th, new_dentry_inode, 0 /* not truncate */ ); reiserfs_update_sd(&th, new_dentry_inode); } retval = journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return retval; }
/* * Determine if a media is present in the floppy drive, and if so, its LBA * capacity. */ static int ide_floppy_get_capacity(ide_drive_t *drive) { struct ide_disk_obj *floppy = drive->driver_data; struct gendisk *disk = floppy->disk; struct ide_atapi_pc pc; u8 *cap_desc; u8 pc_buf[256], header_len, desc_cnt; int i, rc = 1, blocks, length; pax_track_stack(); ide_debug_log(IDE_DBG_FUNC, "enter"); drive->bios_cyl = 0; drive->bios_head = drive->bios_sect = 0; floppy->blocks = 0; floppy->bs_factor = 1; drive->capacity64 = 0; ide_floppy_create_read_capacity_cmd(&pc); if (ide_queue_pc_tail(drive, disk, &pc, pc_buf, pc.req_xfer)) { printk(KERN_ERR PFX "Can't get floppy parameters\n"); return 1; } header_len = pc_buf[3]; cap_desc = &pc_buf[4]; desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */ for (i = 0; i < desc_cnt; i++) { unsigned int desc_start = 4 + i*8; blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]); length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]); ide_debug_log(IDE_DBG_PROBE, "Descriptor %d: %dkB, %d blocks, " "%d sector size", i, blocks * length / 1024, blocks, length); if (i) continue; /* * the code below is valid only for the 1st descriptor, ie i=0 */ switch (pc_buf[desc_start + 4] & 0x03) { /* Clik! drive returns this instead of CAPACITY_CURRENT */ case CAPACITY_UNFORMATTED: if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) /* * If it is not a clik drive, break out * (maintains previous driver behaviour) */ break; case CAPACITY_CURRENT: /* Normal Zip/LS-120 disks */ if (memcmp(cap_desc, &floppy->cap_desc, 8)) printk(KERN_INFO PFX "%s: %dkB, %d blocks, %d " "sector size\n", drive->name, blocks * length / 1024, blocks, length); memcpy(&floppy->cap_desc, cap_desc, 8); if (!length || length % 512) { printk(KERN_NOTICE PFX "%s: %d bytes block size" " not supported\n", drive->name, length); } else { floppy->blocks = blocks; floppy->block_size = length; floppy->bs_factor = length / 512; if (floppy->bs_factor != 1) printk(KERN_NOTICE PFX "%s: Warning: " "non 512 bytes block size not " "fully supported\n", drive->name); drive->capacity64 = floppy->blocks * floppy->bs_factor; rc = 0; } break; case CAPACITY_NO_CARTRIDGE: /* * This is a KERN_ERR so it appears on screen * for the user to see */ printk(KERN_ERR PFX "%s: No disk in drive\n", drive->name); break; case CAPACITY_INVALID: printk(KERN_ERR PFX "%s: Invalid capacity for disk " "in drive\n", drive->name); break; } ide_debug_log(IDE_DBG_PROBE, "Descriptor 0 Code: %d", pc_buf[desc_start + 4] & 0x03); } /* Clik! disk does not support get_flexible_disk_page */ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) (void) ide_floppy_get_flexible_disk_page(drive, &pc); return rc; }