/* * Put character routine for 3215 devices */ static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch) { unsigned long flags; unsigned int length, i; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (ch == '\t') { length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE); raw->line_pos += length; ch = ' '; } else if (ch == '\n') { length = 1; raw->line_pos = 0; } else { length = 1; raw->line_pos++; } raw3215_make_room(raw, length); for (i = 0; i < length; i++) { raw->buffer[raw->head] = (char) _ascebc[(int) ch]; raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1); raw->count++; } if (!(raw->flags & RAW3215_WORKING)) { raw3215_mk_write_req(raw); /* start or queue request */ raw3215_try_io(raw); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * Wait until length bytes are available int the output buffer. * Has to be called with the s390irq lock held. Can be called * disabled. */ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) { while (RAW3215_BUFFER_SIZE - raw->count < length) { /* While console is frozen for suspend we have no other * choice but to drop message from the buffer to make * room for even more messages. */ if (raw->port.flags & ASYNC_SUSPENDED) { raw3215_drop_line(raw); continue; } /* there might be a request pending */ raw->flags |= RAW3215_FLUSHING; raw3215_mk_write_req(raw); raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; #ifdef CONFIG_TN3215_CONSOLE ccw_device_wait_idle(raw->cdev); #endif /* Enough room freed up ? */ if (RAW3215_BUFFER_SIZE - raw->count >= length) break; /* there might be another cpu waiting for the lock */ spin_unlock(get_ccwdev_lock(raw->cdev)); udelay(100); spin_lock(get_ccwdev_lock(raw->cdev)); } }
/* * Shutdown a 3215 device. */ static void raw3215_shutdown(struct raw3215_info *raw) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; if (!(raw->port.flags & ASYNC_INITIALIZED) || (raw->flags & RAW3215_FIXED)) return; /* Wait for outstanding requests, then free irq */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if ((raw->flags & RAW3215_WORKING) || raw->queued_write != NULL || raw->queued_read != NULL) { raw->port.flags |= ASYNC_CLOSING; add_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); schedule(); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); remove_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_RUNNING); raw->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * panic() calls console_unblank before the system enters a * disabled, endless loop. */ static void con3215_unblank(void) { struct raw3215_info *raw; unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * Flush routine, it simply sets the flush flag and tries to start * pending IO. */ static void raw3215_flush_buffer(struct raw3215_info *raw) { unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (raw->count > 0) { raw->flags |= RAW3215_FLUSHING; raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * Enable writing to a 3215 tty */ static void tty3215_start(struct tty_struct *tty) { struct raw3215_info *raw; unsigned long flags; raw = (struct raw3215_info *) tty->driver_data; if (raw->flags & RAW3215_STOPPED) { spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_STOPPED; raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } }
static int raw3215_pm_stop(struct ccw_device *cdev) { struct raw3215_info *raw; unsigned long flags; /* Empty the output buffer, then prevent new I/O. */ raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); raw->port.flags |= ASYNC_SUSPENDED; spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; }
/* * Fire up a 3215 device. */ static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; if (tty_port_initialized(&raw->port)) return 0; raw->line_pos = 0; tty_port_set_initialized(&raw->port, 1); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; }
/* * Fire up a 3215 device. */ static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; if (raw->flags & RAW3215_ACTIVE) return 0; raw->line_pos = 0; raw->flags |= RAW3215_ACTIVE; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; }
/* * Fire up a 3215 device. */ static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; if (raw->port.flags & ASYNC_INITIALIZED) return 0; raw->line_pos = 0; raw->port.flags |= ASYNC_INITIALIZED; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; }
/* * Quiesce device. */ static int dasd_ioctl_quiesce(struct dasd_block *block) { unsigned long flags; struct dasd_device *base; base = block->base; if (!capable (CAP_SYS_ADMIN)) return -EACCES; DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device"); spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); base->stopped |= DASD_STOPPED_QUIESCE; spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); return 0; }
/* * Function to start a delayed output after RAW3215_TIMEOUT seconds */ static void raw3215_timeout(unsigned long __data) { struct raw3215_info *raw = (struct raw3215_info *) __data; unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (raw->flags & RAW3215_TIMER_RUNS) { del_timer(&raw->timer); raw->flags &= ~RAW3215_TIMER_RUNS; raw3215_mk_write_req(raw); raw3215_start_io(raw); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
static int raw3215_pm_start(struct ccw_device *cdev) { struct raw3215_info *raw; unsigned long flags; /* Allow I/O again and flush output buffer. */ raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->port.flags &= ~ASYNC_SUSPENDED; raw->flags |= RAW3215_FLUSHING; raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; }
/* * panic() calls con3215_flush through a panic_notifier * before the system enters a disabled, endless loop. */ static void con3215_flush(void) { struct raw3215_info *raw; unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ if (raw->port.flags & ASYNC_SUSPENDED) /* The console is still frozen for suspend. */ if (ccw_device_force_console(raw->cdev)) /* Forcing didn't work, no panic message .. */ return; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * Quiesce device. */ static int dasd_ioctl_quiesce(struct dasd_block *block) { unsigned long flags; struct dasd_device *base; base = block->base; if (!capable (CAP_SYS_ADMIN)) return -EACCES; pr_info("%s: The DASD has been put in the quiesce " "state\n", dev_name(&base->cdev->dev)); spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); base->stopped |= DASD_STOPPED_QUIESCE; spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); return 0; }
/* * 3215 console initialization code called from console_init(). * NOTE: This is called before kmalloc is available. */ static int __init con3215_init(void) { struct ccw_device *cdev; struct raw3215_info *raw; struct raw3215_req *req; int i; /* Check if 3215 is to be the console */ if (!CONSOLE_IS_3215) return -ENODEV; /* Set the console mode for VM */ if (MACHINE_IS_VM) { cpcmd("TERM CONMODE 3215", NULL, 0); cpcmd("TERM AUTOCR OFF", NULL, 0); } /* allocate 3215 request structures */ raw3215_freelist = NULL; spin_lock_init(&raw3215_freelist_lock); for (i = 0; i < NR_3215_REQ; i++) { req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); req->next = raw3215_freelist; raw3215_freelist = req; } cdev = ccw_device_probe_console(); if (!cdev) return -ENODEV; raw3215[0] = raw = (struct raw3215_info *) alloc_bootmem_low(sizeof(struct raw3215_info)); memset(raw, 0, sizeof(struct raw3215_info)); raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); raw->cdev = cdev; raw->lock = get_ccwdev_lock(cdev); cdev->dev.driver_data = raw; cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; tasklet_init(&raw->tasklet, (void (*)(unsigned long)) raw3215_tasklet, (unsigned long) raw); init_waitqueue_head(&raw->empty_wait); /* Request the console irq */ if (raw3215_startup(raw) != 0) { free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); raw3215[0] = NULL; printk("Couldn't find a 3215 console device\n"); return -ENODEV; } register_console(&con3215); return 0; }
/* * Resume device. */ static int dasd_ioctl_resume(struct dasd_block *block) { unsigned long flags; struct dasd_device *base; base = block->base; if (!capable (CAP_SYS_ADMIN)) return -EACCES; pr_info("%s: I/O operations have been resumed " "on the DASD\n", dev_name(&base->cdev->dev)); spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); base->stopped &= ~DASD_STOPPED_QUIESCE; spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); dasd_schedule_block_bh(block); return 0; }
/* * Quiesce device. */ static int dasd_ioctl_resume(struct dasd_device *device) { unsigned long flags; if (!capable (CAP_SYS_ADMIN)) return -EACCES; DEV_MESSAGE (KERN_DEBUG, device, "%s", "resume IO on device"); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); device->stopped &= ~DASD_STOPPED_QUIESCE; spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_schedule_bh (device); return 0; }
/* * Function to start a delayed output after RAW3215_TIMEOUT seconds */ static void raw3215_timeout(unsigned long __data) { struct raw3215_info *raw = (struct raw3215_info *) __data; unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_TIMER_RUNS; if (!(raw->port.flags & ASYNC_SUSPENDED)) { raw3215_mk_write_req(raw); raw3215_start_io(raw); if ((raw->queued_read || raw->queued_write) && !(raw->flags & RAW3215_WORKING) && !(raw->flags & RAW3215_TIMER_RUNS)) { raw->timer.expires = RAW3215_TIMEOUT + jiffies; add_timer(&raw->timer); raw->flags |= RAW3215_TIMER_RUNS; } } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); }
/* * Wait until length bytes are available int the output buffer. * Has to be called with the s390irq lock held. Can be called * disabled. */ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) { while (RAW3215_BUFFER_SIZE - raw->count < length) { /* there might be a request pending */ raw->flags |= RAW3215_FLUSHING; raw3215_mk_write_req(raw); raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; #ifdef CONFIG_TN3215_CONSOLE wait_cons_dev(); #endif /* Enough room freed up ? */ if (RAW3215_BUFFER_SIZE - raw->count >= length) break; /* there might be another cpu waiting for the lock */ spin_unlock(get_ccwdev_lock(raw->cdev)); udelay(100); spin_lock(get_ccwdev_lock(raw->cdev)); } }
/* * The bottom half handler routine for 3215 devices. It tries to start * the next IO and wakes up processes waiting on the tty. */ static void raw3215_tasklet(void *data) { struct raw3215_info *raw; struct tty_struct *tty; unsigned long flags; raw = (struct raw3215_info *) data; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_mk_write_req(raw); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); /* Check for pending message from raw3215_irq */ if (raw->message != NULL) { printk(raw->message, raw->msg_dstat, raw->msg_cstat); raw->message = NULL; } tty = raw->tty; if (tty != NULL && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { tty_wakeup(tty); } }
static ssize_t tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; ssize_t rc; tdev = dev_get_drvdata(dev); if (tdev->first_minor < 0) return scnprintf(buf, PAGE_SIZE, "N/A\n"); spin_lock_irq(get_ccwdev_lock(tdev->cdev)); if (list_empty(&tdev->req_queue)) rc = scnprintf(buf, PAGE_SIZE, "---\n"); else { struct tape_request *req; req = list_entry(tdev->req_queue.next, struct tape_request, list); rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); } spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); return rc; }
/* * String write routine for 3215 devices */ static void raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) { unsigned long flags; int c, count; while (length > 0) { spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); count = (length > RAW3215_BUFFER_SIZE) ? RAW3215_BUFFER_SIZE : length; length -= count; raw3215_make_room(raw, count); /* copy string to output buffer and convert it to EBCDIC */ while (1) { c = min_t(int, count, min(RAW3215_BUFFER_SIZE - raw->count, RAW3215_BUFFER_SIZE - raw->head)); if (c <= 0) break; memcpy(raw->buffer + raw->head, str, c); ASCEBC(raw->buffer + raw->head, c); raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1); raw->count += c; raw->line_pos += c; str += c; count -= c; } if (!(raw->flags & RAW3215_WORKING)) { raw3215_mk_write_req(raw); /* start or queue request */ raw3215_try_io(raw); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } }
static int raw3215_probe (struct ccw_device *cdev) { struct raw3215_info *raw; int line; raw = kmalloc(sizeof(struct raw3215_info) + RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA); if (raw == NULL) return -ENOMEM; spin_lock(&raw3215_device_lock); for (line = 0; line < NR_3215; line++) { if (!raw3215[line]) { raw3215[line] = raw; break; } } spin_unlock(&raw3215_device_lock); if (line == NR_3215) { kfree(raw); return -ENODEV; } raw->cdev = cdev; raw->lock = get_ccwdev_lock(cdev); raw->inbuf = (char *) raw + sizeof(struct raw3215_info); memset(raw, 0, sizeof(struct raw3215_info)); raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL|GFP_DMA); if (raw->buffer == NULL) { spin_lock(&raw3215_device_lock); raw3215[line] = 0; spin_unlock(&raw3215_device_lock); kfree(raw); return -ENOMEM; } tasklet_init(&raw->tasklet, (void (*)(unsigned long)) raw3215_tasklet, (unsigned long) raw); init_waitqueue_head(&raw->empty_wait); cdev->dev.driver_data = raw; cdev->handler = raw3215_irq; return 0; }
/* * Show function for /proc/tapedevices */ static int tape_proc_show(struct seq_file *m, void *v) { struct tape_device *device; struct tape_request *request; const char *str; unsigned long n; n = (unsigned long) v - 1; if (!n) { seq_printf(m, "TapeNo\tBusID CuType/Model\t" "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); } device = tape_get_device(n); if (IS_ERR(device)) return 0; spin_lock_irq(get_ccwdev_lock(device->cdev)); seq_printf(m, "%d\t", (int) n); seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id); seq_printf(m, "%04X/", device->cdev->id.cu_type); seq_printf(m, "%02X\t", device->cdev->id.cu_model); seq_printf(m, "%04X/", device->cdev->id.dev_type); seq_printf(m, "%02X\t\t", device->cdev->id.dev_model); if (device->char_data.block_size == 0) seq_printf(m, "auto\t"); else seq_printf(m, "%i\t", device->char_data.block_size); if (device->tape_state >= 0 && device->tape_state < TS_SIZE) str = tape_state_verbose[device->tape_state]; else str = "UNKNOWN"; seq_printf(m, "%s\t", str); if (!list_empty(&device->req_queue)) { request = list_entry(device->req_queue.next, struct tape_request, list); str = tape_op_verbose[request->op]; } else
/** * Transmit a packet. * This is a helper function for ctcm_tx(). * * ch Channel to be used for sending. * skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by ctcm_tx(). * * returns 0 on success, -ERRNO on failure. (Never fails.) */ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; /* we need to acquire the lock for testing the state * otherwise we can have an IRQ changing the state to * TXIDLE after the test but before acquiring the lock. */ spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); /* * Protect skb against beeing free'd by upper * layers. */ atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. */ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { /* * idal allocation failed, try via copying to * trans_skb. trans_skb usually has a pre-allocated * idal. */ if (ctcm_checkalloc_buffer(ch)) { /* * Remove our header. It gets added * again on retransmit. */ atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); /* xtime */ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); /* * Remove our header. It gets added * again on retransmit. */ skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
#ifdef CONFIG_TN3270_CONSOLE if (raw3270_registered == 0) { unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)) wait_cons_dev(); spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); } else #endif rc = wait_event_interruptible(raw3270_wait_queue, test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
/* * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. */ static int dasd_ioctl_information(struct dasd_block *block, unsigned int cmd, void __user *argp) { struct dasd_information2_t *dasd_info; unsigned long flags; int rc; struct dasd_device *base; struct ccw_device *cdev; struct ccw_dev_id dev_id; base = block->base; if (!base->discipline->fill_info) return -EINVAL; dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); if (dasd_info == NULL) return -ENOMEM; rc = base->discipline->fill_info(base, dasd_info); if (rc) { kfree(dasd_info); return rc; } cdev = base->cdev; ccw_device_get_id(cdev, &dev_id); dasd_info->devno = dev_id.devno; dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev); dasd_info->cu_type = cdev->id.cu_type; dasd_info->cu_model = cdev->id.cu_model; dasd_info->dev_type = cdev->id.dev_type; dasd_info->dev_model = cdev->id.dev_model; dasd_info->status = base->state; /* * The open_count is increased for every opener, that includes * the blkdev_get in dasd_scan_partitions. * This must be hidden from user-space. */ dasd_info->open_count = atomic_read(&block->open_count); if (!block->bdev) dasd_info->open_count++; /* * check if device is really formatted * LDL / CDL was returned by 'fill_info' */ if ((base->state < DASD_STATE_READY) || (dasd_check_blocksize(block->bp_block))) dasd_info->format = DASD_FORMAT_NONE; dasd_info->features |= ((base->features & DASD_FEATURE_READONLY) != 0); if (base->discipline) memcpy(dasd_info->type, base->discipline->name, 4); else memcpy(dasd_info->type, "none", 4); if (block->request_queue->request_fn) { struct list_head *l; #ifdef DASD_EXTENDED_PROFILING { struct list_head *l; spin_lock_irqsave(&block->lock, flags); list_for_each(l, &block->request_queue->queue_head) dasd_info->req_queue_len++; spin_unlock_irqrestore(&block->lock, flags); } #endif /* DASD_EXTENDED_PROFILING */ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); list_for_each(l, &base->ccw_queue) dasd_info->chanq_len++; spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); } rc = 0; if (copy_to_user(argp, dasd_info, ((cmd == (unsigned int) BIODASDINFO2) ? sizeof(struct dasd_information2_t) : sizeof(struct dasd_information_t)))) rc = -EFAULT; kfree(dasd_info); return rc; }
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { if (ctcm_checkalloc_buffer(ch)) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }