Example #1
0
void nd_dump_stack(void) {
    dump_stack();
}
Example #2
0
/**
 * validate_vid_hdr - validate a volume identifier header.
 * @ubi: UBI device description object
 * @vid_hdr: the volume identifier header to check
 *
 * This function checks that data stored in the volume identifier header
 * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
 */
static int validate_vid_hdr(const struct ubi_device *ubi,
			    const struct ubi_vid_hdr *vid_hdr)
{
	int vol_type = vid_hdr->vol_type;
	int copy_flag = vid_hdr->copy_flag;
	int vol_id = be32_to_cpu(vid_hdr->vol_id);
	int lnum = be32_to_cpu(vid_hdr->lnum);
	int compat = vid_hdr->compat;
	int data_size = be32_to_cpu(vid_hdr->data_size);
	int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
	int data_pad = be32_to_cpu(vid_hdr->data_pad);
	int data_crc = be32_to_cpu(vid_hdr->data_crc);
	int usable_leb_size = ubi->leb_size - data_pad;

	if (copy_flag != 0 && copy_flag != 1) {
		ubi_err("bad copy_flag");
		goto bad;
	}

	if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
	    data_pad < 0) {
		ubi_err("negative values");
		goto bad;
	}

	if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
		ubi_err("bad vol_id");
		goto bad;
	}

	if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
		ubi_err("bad compat");
		goto bad;
	}

	if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
	    compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
	    compat != UBI_COMPAT_REJECT) {
#ifndef CONFIG_BLB
		ubi_err("bad compat");
		goto bad;
#else
		if (vol_id != UBI_BACKUP_VOLUME_ID) {
			ubi_err("bad compat");
			goto bad;
		}
#endif
	}

	if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
		ubi_err("bad vol_type");
		goto bad;
	}

	if (data_pad >= ubi->leb_size / 2) {
		ubi_err("bad data_pad");
		goto bad;
	}

	if (vol_type == UBI_VID_STATIC) {
		/*
		 * Although from high-level point of view static volumes may
		 * contain zero bytes of data, but no VID headers can contain
		 * zero at these fields, because they empty volumes do not have
		 * mapped logical eraseblocks.
		 */
		if (used_ebs == 0) {
			ubi_err("zero used_ebs");
			goto bad;
		}
		if (data_size == 0) {
			ubi_err("zero data_size");
			goto bad;
		}
		if (lnum < used_ebs - 1) {
			if (data_size != usable_leb_size) {
				ubi_err("bad data_size");
				goto bad;
			}
		} else if (lnum == used_ebs - 1) {
			if (data_size == 0) {
				ubi_err("bad data_size at last LEB");
				goto bad;
			}
		} else {
			ubi_err("too high lnum");
			goto bad;
		}
	} else {
		if (copy_flag == 0) {
			if (data_crc != 0) {
				ubi_err("non-zero data CRC");
				goto bad;
			}
			if (data_size != 0) {
				ubi_err("non-zero data_size");
				goto bad;
			}
		} else {
			if (data_size == 0) {
				ubi_err("zero data_size of copy");
				goto bad;
			}
		}
		if (used_ebs != 0) {
			ubi_err("bad used_ebs");
			goto bad;
		}
	}

	return 0;

bad:
	ubi_err("bad VID header");
	ubi_dump_vid_hdr(vid_hdr);
	dump_stack();
	return 1;
}
Example #3
0
/**
 * ubifs_wbuf_write_nolock - write data to flash via write-buffer.
 * @wbuf: write-buffer
 * @buf: node to write
 * @len: node length
 *
 * This function writes data to flash via write-buffer @wbuf. This means that
 * the last piece of the node won't reach the flash media immediately if it
 * does not take whole max. write unit (@c->max_write_size). Instead, the node
 * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or
 * because more data are appended to the write-buffer).
 *
 * This function returns zero in case of success and a negative error code in
 * case of failure. If the node cannot be written because there is no more
 * space in this logical eraseblock, %-ENOSPC is returned.
 */
int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
{
	struct ubifs_info *c = wbuf->c;
	int err, written, n, aligned_len = ALIGN(len, 8);

	dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
	       dbg_ntype(((struct ubifs_ch *)buf)->node_type),
	       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
	ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
	ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
	ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
	ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
	ubifs_assert(wbuf->size >= c->min_io_size);
	ubifs_assert(wbuf->size <= c->max_write_size);
	ubifs_assert(wbuf->size % c->min_io_size == 0);
	ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
	ubifs_assert(!c->ro_media && !c->ro_mount);
	ubifs_assert(!c->space_fixup);
	if (c->leb_size - wbuf->offs >= c->max_write_size)
		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));

	if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
		err = -ENOSPC;
		goto out;
	}

	cancel_wbuf_timer_nolock(wbuf);

	if (c->ro_error)
		return -EROFS;

	if (aligned_len <= wbuf->avail) {
		/*
		 * The node is not very large and fits entirely within
		 * write-buffer.
		 */
		memcpy(wbuf->buf + wbuf->used, buf, len);

		if (aligned_len == wbuf->avail) {
			dbg_io("flush jhead %s wbuf to LEB %d:%d",
			       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
			err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
					      wbuf->offs, wbuf->size);
			if (err)
				goto out;

			spin_lock(&wbuf->lock);
			wbuf->offs += wbuf->size;
			if (c->leb_size - wbuf->offs >= c->max_write_size)
				wbuf->size = c->max_write_size;
			else
				wbuf->size = c->leb_size - wbuf->offs;
			wbuf->avail = wbuf->size;
			wbuf->used = 0;
			wbuf->next_ino = 0;
			spin_unlock(&wbuf->lock);
		} else {
			spin_lock(&wbuf->lock);
			wbuf->avail -= aligned_len;
			wbuf->used += aligned_len;
			spin_unlock(&wbuf->lock);
		}

		goto exit;
	}

	written = 0;

	if (wbuf->used) {
		/*
		 * The node is large enough and does not fit entirely within
		 * current available space. We have to fill and flush
		 * write-buffer and switch to the next max. write unit.
		 */
		dbg_io("flush jhead %s wbuf to LEB %d:%d",
		       dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
		memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
		err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
				      wbuf->size);
		if (err)
			goto out;

		wbuf->offs += wbuf->size;
		len -= wbuf->avail;
		aligned_len -= wbuf->avail;
		written += wbuf->avail;
	} else if (wbuf->offs & (c->max_write_size - 1)) {
		/*
		 * The write-buffer offset is not aligned to
		 * @c->max_write_size and @wbuf->size is less than
		 * @c->max_write_size. Write @wbuf->size bytes to make sure the
		 * following writes are done in optimal @c->max_write_size
		 * chunks.
		 */
		dbg_io("write %d bytes to LEB %d:%d",
		       wbuf->size, wbuf->lnum, wbuf->offs);
		err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
				      wbuf->size);
		if (err)
			goto out;

		wbuf->offs += wbuf->size;
		len -= wbuf->size;
		aligned_len -= wbuf->size;
		written += wbuf->size;
	}

	/*
	 * The remaining data may take more whole max. write units, so write the
	 * remains multiple to max. write unit size directly to the flash media.
	 * We align node length to 8-byte boundary because we anyway flash wbuf
	 * if the remaining space is less than 8 bytes.
	 */
	n = aligned_len >> c->max_write_shift;
	if (n) {
		n <<= c->max_write_shift;
		dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
		       wbuf->offs);
		err = ubifs_leb_write(c, wbuf->lnum, buf + written,
				      wbuf->offs, n);
		if (err)
			goto out;
		wbuf->offs += n;
		aligned_len -= n;
		len -= n;
		written += n;
	}

	spin_lock(&wbuf->lock);
	if (aligned_len)
		/*
		 * And now we have what's left and what does not take whole
		 * max. write unit, so write it to the write-buffer and we are
		 * done.
		 */
		memcpy(wbuf->buf, buf + written, len);

	if (c->leb_size - wbuf->offs >= c->max_write_size)
		wbuf->size = c->max_write_size;
	else
		wbuf->size = c->leb_size - wbuf->offs;
	wbuf->avail = wbuf->size - aligned_len;
	wbuf->used = aligned_len;
	wbuf->next_ino = 0;
	spin_unlock(&wbuf->lock);

exit:
	if (wbuf->sync_callback) {
		int free = c->leb_size - wbuf->offs - wbuf->used;

		err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
		if (err)
			goto out;
	}

	if (wbuf->used)
		new_wbuf_timer_nolock(wbuf);

	return 0;

out:
	ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
		  len, wbuf->lnum, wbuf->offs, err);
	ubifs_dump_node(c, buf);
	dump_stack();
	ubifs_dump_leb(c, wbuf->lnum);
	return err;
}
/**
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
 *
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
 *
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf,
			      struct tipc_bearer *unused1,
			      struct tipc_media_addr *unused2)
{
	int bp_index;

	/* Prepare broadcast link message for reliable transmission,
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
	if (likely(!msg_non_seq(buf_msg(buf)))) {
		struct tipc_msg *msg;

		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
		msg = buf_msg(buf);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		bcl->stats.sent_info++;

		if (WARN_ON(!bclink->bcast_nodes.count)) {
			dump_stack();
			return 0;
		}
	}

	/* Send buffer over bearers until all targets reached */
	bcbearer->remains = bclink->bcast_nodes;

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
		struct tipc_bearer *b = p;
		struct sk_buff *tbuf;

		if (!p)
			break; /* No more bearers to try */

		if (tipc_bearer_blocked(p)) {
			if (!s || tipc_bearer_blocked(s))
				continue; /* Can't use either bearer */
			b = s;
		}

		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
			       &bcbearer->remains_new);
		if (bcbearer->remains_new.count == bcbearer->remains.count)
			continue; /* Nothing added by bearer pair */

		if (bp_index == 0) {
			/* Use original buffer for first bearer */
			tipc_bearer_send(b, buf, &b->bcast_addr);
		} else {
			/* Avoid concurrent buffer access */
#ifdef CONFIG_LGP_DATA_TCPIP_MPTCP
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
#else
			tbuf = pskb_copy(buf, GFP_ATOMIC);
#endif
			if (!tbuf)
				break;
			tipc_bearer_send(b, tbuf, &b->bcast_addr);
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}

		/* Swap bearers for next packet */
		if (s) {
			bcbearer->bpairs[bp_index].primary = s;
			bcbearer->bpairs[bp_index].secondary = p;
		}

		if (bcbearer->remains_new.count == 0)
			break; /* All targets reached */

		bcbearer->remains = bcbearer->remains_new;
	}

	return 0;
}
Example #5
0
/**
 * ubi_io_write - write data to a physical eraseblock.
 * @ubi: UBI device description object
 * @buf: buffer with the data to write
 * @pnum: physical eraseblock number to write to
 * @offset: offset within the physical eraseblock where to write
 * @len: how many bytes to write
 *
 * This function writes @len bytes of data from buffer @buf to offset @offset
 * of physical eraseblock @pnum. If all the data were successfully written,
 * zero is returned. If an error occurred, this function returns a negative
 * error code. If %-EIO is returned, the physical eraseblock most probably went
 * bad.
 *
 * Note, in case of an error, it is possible that something was still written
 * to the flash media, but may be some garbage.
 */
int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
		 int len)
{
	int err;
	size_t written;
	loff_t addr;

	dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);

	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
	ubi_assert(offset % ubi->hdrs_min_io_size == 0);
	ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);

	if (ubi->ro_mode) {
		ubi_err("read-only mode");
		return -EROFS;
	}

	err = self_check_not_bad(ubi, pnum);
	if (err)
		return err;

	/* The area we are writing to has to contain all 0xFF bytes */
	err = ubi_self_check_all_ff(ubi, pnum, offset, len);
	if (err)
		return err;

	if (offset >= ubi->leb_start) {
		/*
		 * We write to the data area of the physical eraseblock. Make
		 * sure it has valid EC and VID headers.
		 */
		err = self_check_peb_ec_hdr(ubi, pnum);
		if (err)
			return err;
		err = self_check_peb_vid_hdr(ubi, pnum);
		if (err)
			return err;
	}

	if (ubi_dbg_is_write_failure(ubi)) {
		ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
			len, pnum, offset);
		dump_stack();
		return -EIO;
	}

	addr = (loff_t)pnum * ubi->peb_size + offset;
	err = mtd_write(ubi->mtd, addr, len, &written, buf);
	if (err) {
		ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
			err, len, pnum, offset, written);
		dump_stack();
		ubi_dump_flash(ubi, pnum, offset, len);
	} else
		ubi_assert(written == len);

	if (!err) {
		err = self_check_write(ubi, buf, pnum, offset, len);
		if (err)
			return err;

		/*
		 * Since we always write sequentially, the rest of the PEB has
		 * to contain only 0xFF bytes.
		 */
		offset += len;
		len = ubi->peb_size - offset;
		if (len)
			err = ubi_self_check_all_ff(ubi, pnum, offset, len);
	}

	return err;
}
Example #6
0
/* kprobe pre_handler: called just before the probed instruction is executed */
static int handler_pre(struct kprobe *p, struct pt_regs *regs)
{
    printk(KERN_INFO "pre_handler: p->addr = 0x%p\n", p->addr);
    dump_stack();
    return 0;
}
Example #7
0
/**
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
 *
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
 *
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf,
                              struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
{
    int bp_index;

    /*
     * Prepare broadcast link message for reliable transmission,
     * if first time trying to send it;
     * preparation is skipped for broadcast link protocol messages
     * since they are sent in an unreliable manner and don't need it
     */
    if (likely(!msg_non_seq(buf_msg(buf)))) {
        struct tipc_msg *msg;

        bcbuf_set_acks(buf, bclink->bcast_nodes.count);
        msg = buf_msg(buf);
        msg_set_non_seq(msg, 1);
        msg_set_mc_netid(msg, tipc_net_id);
        bcl->stats.sent_info++;

        if (WARN_ON(!bclink->bcast_nodes.count)) {
            dump_stack();
            return 0;
        }
    }

    /* Send buffer over bearers until all targets reached */
    bcbearer->remains = bclink->bcast_nodes;

    for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
        struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
        struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;

        if (!p)
            break;	/* no more bearers to try */

        tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
        if (bcbearer->remains_new.count == bcbearer->remains.count)
            continue;	/* bearer pair doesn't add anything */

        if (p->blocked ||
                p->media->send_msg(buf, p, &p->media->bcast_addr)) {
            /* unable to send on primary bearer */
            if (!s || s->blocked ||
                    s->media->send_msg(buf, s,
                                       &s->media->bcast_addr)) {
                /* unable to send on either bearer */
                continue;
            }
        }

        if (s) {
            bcbearer->bpairs[bp_index].primary = s;
            bcbearer->bpairs[bp_index].secondary = p;
        }

        if (bcbearer->remains_new.count == 0)
            break;	/* all targets reached */

        bcbearer->remains = bcbearer->remains_new;
    }

    return 0;
}
Example #8
0
/*
 * Sets the DAVINCI MUX register based on the table
 */
int __init_or_module davinci_cfg_reg(const unsigned long index)
{
	static DEFINE_SPINLOCK(mux_spin_lock);
	struct davinci_soc_info *soc_info = &davinci_soc_info;
	unsigned long flags;
	const struct mux_config *cfg;
	unsigned int reg_orig = 0, reg = 0;
	unsigned int mask, warn = 0;

	if (WARN_ON(!soc_info->pinmux_pins))
		return -ENODEV;

	if (!pinmux_base) {
		pinmux_base = ioremap(soc_info->pinmux_base, SZ_4K);
		if (WARN_ON(!pinmux_base))
			return -ENOMEM;
	}

	if (index >= soc_info->pinmux_pins_num) {
		printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
		       index, soc_info->pinmux_pins_num);
		dump_stack();
		return -ENODEV;
	}

	cfg = &soc_info->pinmux_pins[index];

	if (cfg->name == NULL) {
		printk(KERN_ERR "No entry for the specified index\n");
		return -ENODEV;
	}

	/* Update the mux register in question */
	if (cfg->mask) {
		unsigned	tmp1, tmp2;

		spin_lock_irqsave(&mux_spin_lock, flags);
		reg_orig = __raw_readl(pinmux_base + cfg->mux_reg);

		mask = (cfg->mask << cfg->mask_offset);
		tmp1 = reg_orig & mask;
		reg = reg_orig & ~mask;

		tmp2 = (cfg->mode << cfg->mask_offset);
		reg |= tmp2;

		if (tmp1 != tmp2)
			warn = 1;

		__raw_writel(reg, pinmux_base + cfg->mux_reg);
		spin_unlock_irqrestore(&mux_spin_lock, flags);
	}

	if (warn) {
#ifdef CONFIG_DAVINCI_MUX_WARNINGS
		printk(KERN_WARNING "MUX: initialized %s\n", cfg->name);
#endif
	}

#ifdef CONFIG_DAVINCI_MUX_DEBUG
	if (cfg->debug || warn) {
		printk(KERN_WARNING "MUX: Setting register %s\n", cfg->name);
		printk(KERN_WARNING "	   %s (0x%08x) = 0x%08x -> 0x%08x\n",
		       cfg->mux_reg_name, cfg->mux_reg, reg_orig, reg);
	}
#endif

	return 0;
}
Example #9
0
static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
			       unsigned int plen, void *payload,
			       unsigned int outlen, void *out)
{
	struct ar9170_usb *aru = (void *) ar;
	struct urb *urb = NULL;
	unsigned long flags;
	int err = -ENOMEM;

	if (unlikely(!IS_ACCEPTING_CMD(ar)))
		return -EPERM;

	if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
		return -EINVAL;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (unlikely(!urb))
		goto err_free;

	ar->cmdbuf[0] = cpu_to_le32(plen);
	ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
	/* writing multiple regs fills this buffer already */
	if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
		memcpy(&ar->cmdbuf[1], payload, plen);

	spin_lock_irqsave(&aru->common.cmdlock, flags);
	aru->readbuf = (u8 *)out;
	aru->readlen = outlen;
	spin_unlock_irqrestore(&aru->common.cmdlock, flags);

	usb_fill_int_urb(urb, aru->udev,
			 usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
			 aru->common.cmdbuf, plen + 4,
			 ar9170_usb_tx_urb_complete, NULL, 1);

	usb_anchor_urb(urb, &aru->tx_submitted);
	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		goto err_unbuf;
	}
	usb_free_urb(urb);

	err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
	if (err == 0) {
		err = -ETIMEDOUT;
		goto err_unbuf;
	}

	if (aru->readlen != outlen) {
		err = -EMSGSIZE;
		goto err_unbuf;
	}

	return 0;

err_unbuf:
	/* Maybe the device was removed in the second we were waiting? */
	if (IS_STARTED(ar)) {
		dev_err(&aru->udev->dev, "no command feedback "
					 "received (%d).\n", err);

		/* provide some maybe useful debug information */
		print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
				     aru->common.cmdbuf, plen + 4);
		dump_stack();
	}

	/* invalidate to avoid completing the next prematurely */
	spin_lock_irqsave(&aru->common.cmdlock, flags);
	aru->readbuf = NULL;
	aru->readlen = 0;
	spin_unlock_irqrestore(&aru->common.cmdlock, flags);

err_free:

	return err;
}
Example #10
0
int ocfs2_get_block(struct inode *inode, sector_t iblock,
		    struct buffer_head *bh_result, int create)
{
	int err = 0;
	unsigned int ext_flags;
	u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
	u64 p_blkno, count, past_eof;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
		   (unsigned long long)iblock, bh_result, create);

	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
		mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
		     inode, inode->i_ino);

	if (S_ISLNK(inode->i_mode)) {
		/* this always does I/O for some reason. */
		err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
		goto bail;
	}

	err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
					  &ext_flags);
	if (err) {
		mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
		     "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
		     (unsigned long long)p_blkno);
		goto bail;
	}

	if (max_blocks < count)
		count = max_blocks;

	/*
	 * ocfs2 never allocates in this function - the only time we
	 * need to use BH_New is when we're extending i_size on a file
	 * system which doesn't support holes, in which case BH_New
	 * allows block_prepare_write() to zero.
	 *
	 * If we see this on a sparse file system, then a truncate has
	 * raced us and removed the cluster. In this case, we clear
	 * the buffers dirty and uptodate bits and let the buffer code
	 * ignore it as a hole.
	 */
	if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
		clear_buffer_dirty(bh_result);
		clear_buffer_uptodate(bh_result);
		goto bail;
	}

	/* Treat the unwritten extent as a hole for zeroing purposes. */
	if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
		map_bh(bh_result, inode->i_sb, p_blkno);

	bh_result->b_size = count << inode->i_blkbits;

	if (!ocfs2_sparse_alloc(osb)) {
		if (p_blkno == 0) {
			err = -EIO;
			mlog(ML_ERROR,
			     "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
			     (unsigned long long)iblock,
			     (unsigned long long)p_blkno,
			     (unsigned long long)OCFS2_I(inode)->ip_blkno);
			mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
			dump_stack();
			goto bail;
		}
	}

	past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
	mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
	     (unsigned long long)past_eof);
	if (create && (iblock >= past_eof))
		set_buffer_new(bh_result);

bail:
	if (err < 0)
		err = -EIO;

	mlog_exit(err);
	return err;
}
Example #11
0
static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
					struct iwl_host_cmd *cmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
	int cmd_idx;
	int ret;

	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);

	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
		 "Command %s: a command is already active!\n", cmd_str))
		return -EIO;

	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);

	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
				 pm_runtime_active(&trans_pcie->pci_dev->dev),
				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
		if (!ret) {
			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
			return -ETIMEDOUT;
		}
	}

	cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
	if (cmd_idx < 0) {
		ret = cmd_idx;
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
			cmd_str, ret);
		return ret;
	}

	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
	if (!ret) {
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
			cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
			txq->read_ptr, txq->write_ptr);

		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
			       cmd_str);
		ret = -ETIMEDOUT;

		iwl_force_nmi(trans);
		iwl_trans_fw_error(trans);

		goto cancel;
	}

	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
		IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
		dump_stack();
		ret = -EIO;
		goto cancel;
	}

	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
		IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
	}

	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
	}

	return ret;
}
int sprd6500_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata)
{
	int ret = 0;
	struct platform_device *pdev;

	pr_err("[MODEM_IF:SC6500] <%s> start\n", __func__);
	
	pdev = to_platform_device(mc->dev);

#if 0 //def CONFIG_OF
	sprd6500_modem_cfg_gpio(pdev);
#endif

	mc->gpio_cp_on = pdata->gpio_cp_on;
	mc->gpio_pda_active = pdata->gpio_pda_active;
	mc->gpio_phone_active = pdata->gpio_phone_active;
	mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int;
	mc->gpio_sim_detect = pdata->gpio_sim_detect;

	mc->gpio_ap_cp_int1 = pdata->gpio_ap_cp_int1;
	mc->gpio_ap_cp_int2 = pdata->gpio_ap_cp_int2;
	mc->gpio_uart_sel = pdata->gpio_uart_sel;
#ifdef CONFIG_SEC_DUAL_MODEM_MODE
	mc->gpio_sim_sel = pdata->gpio_sim_sel;
#endif
	
#if defined(CONFIG_LINK_DEVICE_PLD)
	mc->gpio_fpga1_cs_n = pdata->gpio_fpga1_cs_n;
#endif
	gpio_set_value(mc->gpio_cp_on, 0);

	mc->irq_phone_active = platform_get_irq_byname(pdev, "cp_active_irq");
	pr_info("[MODEM_IF:SC6500] <%s> PHONE_ACTIVE IRQ# = %d\n",
		__func__, mc->irq_phone_active);

	sprd6500_get_ops(mc);

	if (mc->irq_phone_active) {
		ret = request_irq(mc->irq_phone_active,
				  phone_active_irq_handler,
				  IRQF_TRIGGER_HIGH,
				  "esc_active",
				  mc);
		if (ret) {
			pr_err("[MODEM_IF:SC6500] <%s> failed to request_irq IRQ# %d (err=%d)\n",
				__func__, mc->irq_phone_active, ret);
			dump_stack();
			return ret;
		}

#if 1 // don't enable wake option in temp
		enable_irq(mc->irq_phone_active);
#else
		ret = enable_irq_wake(mc->irq_phone_active);
		if (ret) {
			pr_err("[MODEM_IF:SC6500] %s: failed to enable_irq_wake IRQ# %d (err=%d)\n",
				__func__, mc->irq_phone_active, ret);
			free_irq(mc->irq_phone_active, mc);
			return ret;
		}
#endif
	}

#if defined(CONFIG_SIM_DETECT)
	mc->irq_sim_detect = platform_get_irq_byname(pdev, "sim_irq");
	pr_info("[MODEM_IF:SC6500] <%s> SIM_DECTCT IRQ# = %d\n",
		__func__, mc->irq_sim_detect);

	if (mc->irq_sim_detect) {
		ret = request_irq(mc->irq_sim_detect, sim_detect_irq_handler,
			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
			"esc_sim_detect", mc);
		if (ret) {
			mif_err("failed to request_irq: %d\n", ret);
				mc->sim_state.online = false;
				mc->sim_state.changed = false;
			return ret;
		}

		ret = enable_irq_wake(mc->irq_sim_detect);
		if (ret) {
			mif_err("failed to enable_irq_wake: %d\n", ret);
			free_irq(mc->irq_sim_detect, mc);
			mc->sim_state.online = false;
			mc->sim_state.changed = false;
			return ret;
		}

		/* initialize sim_state => insert: gpio=0, remove: gpio=1 */
		mc->sim_state.online = !gpio_get_value(mc->gpio_sim_detect);
	}
#endif

	return ret;
}
Example #13
0
File: vm.c Project: catseye/Bhuna
int
vm_run(struct vm *vm, int xmax)
{
	vm_label_t label;
	struct value l, r, v;
	struct activation *ar;
	struct builtin *ext_bi;
	int varity;
	int xcount = 0;
	struct value zero, one, two;
	/*int upcount, index; */

#ifdef DEBUG
	if (trace_vm) {
		printf("___ virtual machine started ___\n");
	}
#endif

	zero = value_new_integer(0);
	value_deregister(zero);
	one = value_new_integer(1);
	value_deregister(one);
	two = value_new_integer(2);
	value_deregister(two);

	while (*vm->pc != INSTR_HALT) {
#ifdef DEBUG
		if (trace_vm) {
			printf("#%d:\n", vm->pc - vm->program);
			dump_stack(vm);
		}
#endif
		if (((++xcount) & 0xff) == 0) {
			if (a_count + v_count > gc_target) {
#ifdef DEBUG
				if (trace_gc > 0) {
					printf("[ARC] GARBAGE COLLECTION STARTED on %d activation records + %d values\n",
						a_count, v_count);
					/*activation_dump(current_ar, 0);
					printf("\n");*/
					dump_activation_stack(vm);
				}
#endif
				gc();
#ifdef DEBUG
				if (trace_gc > 0) {
					printf("[ARC] GARBAGE COLLECTION FINISHED, now %d activation records + %d values\n",
						a_count, v_count);
					/*activation_dump(current_ar, 0);
					printf("\n");*/
				}
#endif
				/*
				 * Slide the target to account for the fact that there
				 * are now 'a_count' activation records in existence.
				 * Only GC when there are gc_trigger *more* ar's.
				 */
				gc_target = a_count + v_count + gc_trigger;
			}
			
			/*
			 * Also, give up control if we've exceeded our timeslice.
			 */
			if (xcount >= xmax)
				return(VM_TIME_EXPIRED);
		}

		switch (*vm->pc) {

#ifdef INLINE_BUILTINS
		case INDEX_BUILTIN_NOT:
			POP_VALUE(l);
			if (l.type == VALUE_BOOLEAN) {
				v = value_new_boolean(!l.v.b);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_AND:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_BOOLEAN && r.type == VALUE_BOOLEAN) {
				v = value_new_boolean(l.v.b && r.v.b);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_OR:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_BOOLEAN && r.type == VALUE_BOOLEAN) {
				v = value_new_boolean(l.v.b || r.v.b);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;

		case INDEX_BUILTIN_EQU:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i == r.v.i);
			} else if (l.type == VALUE_OPAQUE && r.type == VALUE_OPAQUE) {
				v = value_new_boolean(l.v.ptr == r.v.ptr);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_NEQ:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i != r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_GT:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i > r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_LT:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i < r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_GTE:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i >= r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_LTE:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_boolean(l.v.i <= r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;

		case INDEX_BUILTIN_ADD:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_integer(l.v.i + r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_MUL:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_integer(l.v.i * r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_SUB:
			POP_VALUE(r);
			POP_VALUE(l);
			/* subs++; */
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				v = value_new_integer(l.v.i - r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_DIV:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				if (r.v.i == 0)
					v = value_new_error("division by zero");
				else
					v = value_new_integer(l.v.i / r.v.i);
			} else {
				v = value_new_error("type mismatch");
			}
			PUSH_VALUE(v);
			break;
		case INDEX_BUILTIN_MOD:
			POP_VALUE(r);
			POP_VALUE(l);
			if (l.type == VALUE_INTEGER && r.type == VALUE_INTEGER) {
				if (r.v.i == 0)
					v = value_new_error("modulo by zero");
				else
					v = value_new_integer(l.v.i % r.v.i);
			} else {
				v = value_new_error("type mismatch");			}
			PUSH_VALUE(v);
			break;

#endif /* INLINE_BUILTINS */

		/*
		 * This sort of needs to be here even when INLINE_BUILTINS
		 * isn't used (in practice INLINE_BUILTINS will always be
		 * used anyway...)
		 */
		case INDEX_BUILTIN_RECV:
			POP_VALUE(l);
			r = value_null();

			if (l.type == VALUE_INTEGER) {
				if (!process_recv(&r)) {
					PUSH_VALUE(l);
					return(VM_WAITING);
				}
			} else {
				r = value_new_error("type mismatch");
			}
			PUSH_VALUE(r);
			break;

		case INSTR_PUSH_VALUE:
			l = *(struct value *)(vm->pc + 1);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_PUSH_VALUE:\n");
				value_print(l);
				printf("\n");
			}
#endif
			PUSH_VALUE(l);
			vm->pc += sizeof(struct value);
			break;

		case INSTR_PUSH_ZERO:
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_PUSH_ZERO\n");
			}
#endif
			PUSH_VALUE(zero);
			break;
		case INSTR_PUSH_ONE:
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_PUSH_ONE\n");
			}
#endif
			PUSH_VALUE(one);
			break;
		case INSTR_PUSH_TWO:
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_PUSH_TWO\n");
			}
#endif
			PUSH_VALUE(two);
			break;

		case INSTR_PUSH_LOCAL:
			l = activation_get_value(vm->current_ar,
			    *(vm->pc + 1), *(vm->pc + 2));

#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_PUSH_LOCAL:\n");
				value_print(l);
				printf("\n");
			}
#endif
			PUSH_VALUE(l);
			vm->pc += sizeof(unsigned char) * 2;
			break;

		case INSTR_POP_LOCAL:
			POP_VALUE(l);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_POP_LOCAL:\n");
				value_print(l);
				printf("\n");
			}
#endif
			activation_set_value(vm->current_ar,
			    *(vm->pc + 1), *(vm->pc + 2), l);
			vm->pc += sizeof(unsigned char) * 2;
			break;

		case INSTR_INIT_LOCAL:
			POP_VALUE(l);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_INIT_LOCAL:\n");
				value_print(l);
				printf("\n");
			}
#endif
			activation_initialize_value(vm->current_ar,
			    *(vm->pc + 1), l);
			vm->pc += sizeof(unsigned char) * 2;
			break;

		case INSTR_JMP:
			label = *(vm_label_t *)(vm->pc + 1);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_JMP -> #%d:\n", label - vm->program);
			}
#endif
			vm->pc = label - 1;
			break;

		case INSTR_JZ:
			POP_VALUE(l);
			label = *(vm_label_t *)(vm->pc + 1);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_JZ -> ");
				value_print(l);
				printf(", #%d:\n", label - vm->program);
			}
#endif
			if (!l.v.b) {
				vm->pc = label - 1;
			} else {
				vm->pc += sizeof(vm_label_t);
			}
			break;

		case INSTR_CALL:
			POP_VALUE(l);
			label = l.v.s->v.k->label;
			if (l.v.s->v.k->cc > 0) {
				/*
				 * Create a new activation record
				 * on the heap for this call.
				 */
				ar = activation_new_on_heap(
				    l.v.s->v.k->arity +
				    l.v.s->v.k->locals,
				    vm->current_ar, l.v.s->v.k->ar);
			} else {
				/*
				 * Optimize by placing it on a stack.
				 */
				ar = activation_new_on_stack(
				    l.v.s->v.k->arity +
				    l.v.s->v.k->locals,
				    vm->current_ar, l.v.s->v.k->ar, vm);
			}
			/*
			 * Fill out the activation record.
			 */
			for (i = l.v.s->v.k->arity - 1; i >= 0; i--) {
				POP_VALUE(r);
				activation_initialize_value(ar, i, r);
			}

			vm->current_ar = ar;
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_CALL -> #%d:\n", label - vm->program);
			}
#endif
			/*
			printf("%% process %d pushing pc = %d\n",
			    current_process->number, vm->pc - vm->program);
			*/
			PUSH_PC(vm->pc + 1); /* + sizeof(vm_label_t)); */
			vm->pc = label - 1;
			break;

		case INSTR_GOTO:
			POP_VALUE(l);
			label = l.v.s->v.k->label;

			/*
			 * DON'T create a new activation record for this leap
			 * UNLESS the current activation record isn't large enough.
			 */
			/*
			printf("GOTOing a closure w/arity %d locals %d\n",
				l.v.s->v.k->arity, l.v.s->v.k->locals);
			printf("current ar size %d\n", current_ar->size);
			*/

			if (vm->current_ar->size < l.v.s->v.k->arity + l.v.s->v.k->locals) {
				/*
				 * REMOVE the current activation record, if on the stack.
				 */
				if (vm->current_ar->admin & AR_ADMIN_ON_STACK) {
					ar = vm->current_ar->caller;
					activation_free_from_stack(vm->current_ar, vm);
					vm->current_ar = ar;
				} else {
					vm->current_ar = vm->current_ar->caller;
				}

				/*
				 * Create a NEW activation record... wherever.
				 */
				if (l.v.s->v.k->cc > 0) {
					/*
					 * Create a new activation record
					 * on the heap for this call.
					 */
					vm->current_ar = activation_new_on_heap(
					    l.v.s->v.k->arity +
					    l.v.s->v.k->locals,
					    vm->current_ar, l.v.s->v.k->ar);
				} else {
					/*
					 * Optimize by placing it on a stack.
					 */
					vm->current_ar = activation_new_on_stack(
					    l.v.s->v.k->arity +
					    l.v.s->v.k->locals,
					    vm->current_ar, l.v.s->v.k->ar, vm);
				}
			}

			/*
			printf("NOW GOTOing a closure w/arity %d locals %d\n",
				l.v.s->v.k->arity, l.v.s->v.k->locals);
			printf("NOW current ar size %d\n", current_ar->size);
			*/

			/*
			 * Fill out the current activation record.
			 */
			for (i = l.v.s->v.k->arity - 1; i >= 0; i--) {
				POP_VALUE(r);
				activation_set_value(vm->current_ar, i, 0, r);
			}

#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_GOTO -> #%d:\n", label - vm->program);
			}
#endif
			/*PUSH_PC(pc + 1);*/ /* + sizeof(vm_label_t)); */
			vm->pc = label - 1;
			break;

		case INSTR_RET:
			vm->pc = POP_PC() - 1;
			/*
			printf("%% process %d popped pc = %d\n",
			    current_process->number, vm->pc - vm->program);
			*/
			if (vm->current_ar->admin & AR_ADMIN_ON_STACK) {
				ar = vm->current_ar->caller;
				activation_free_from_stack(vm->current_ar, vm);
				vm->current_ar = ar;
			} else {
				vm->current_ar = vm->current_ar->caller;
			}
			if (vm->current_ar == NULL)
				return(VM_RETURNED);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_RET -> #%d:\n", vm->pc - vm->program);
			}
#endif
			break;

		case INSTR_SET_ACTIVATION:
			POP_VALUE(l);
			l.v.s->v.k->ar = vm->current_ar;
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_SET_ACTIVATION #%d\n",
				    l.v.s->v.k->label - vm->program);
			}
#endif
			PUSH_VALUE(l);
			break;

		case INSTR_COW_LOCAL:
			l = activation_get_value(vm->current_ar, *(vm->pc + 1), *(vm->pc + 2));

			if (l.v.s->refcount > 1) {
				/*
				printf("deep-copying ");
				value_print(l);
				printf("...\n");
				*/
				r = value_dup(l);
				activation_set_value(vm->current_ar, *(vm->pc + 1), *(vm->pc + 2), r);
			}

#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_COW_LOCAL:\n");
				value_print(l);
				printf("\n");
			}
#endif

			vm->pc += sizeof(unsigned char) * 2;
			break;

		case INSTR_EXTERNAL:
			ext_bi = *(struct builtin **)(vm->pc + 1);
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_EXTERNAL(");
				fputsu8(stdout, ext_bi->name);
				printf("):\n");
			}
#endif
			varity = ext_bi->arity;
			if (varity == -1) {
				POP_VALUE(l);
				varity = l.v.i;
			}
			ar = activation_new_on_stack(varity, vm->current_ar, NULL, vm);
			for (i = varity - 1; i >= 0; i--) {
				POP_VALUE(l);
				activation_initialize_value(ar, i, l);
			}

			v = ext_bi->fn(ar);
			activation_free_from_stack(ar, vm);
#ifdef DEBUG
			if (trace_vm) {
				printf("result was:\n");
				value_print(v);
				printf("\n");
			}
#endif
			if (ext_bi->retval == 1)
				PUSH_VALUE(v);

			vm->pc += sizeof(struct builtin *);
			break;
		default:
			/*
			 * We assume it was a non-inline builtin.
			 */
#ifdef DEBUG
			if (trace_vm) {
				printf("INSTR_BUILTIN(#%d=", *vm->pc);
				fputsu8(stdout, builtins[*vm->pc].name);
				printf("):\n");
			}
#endif
			varity = builtins[*vm->pc].arity;
			if (varity == -1) {
				POP_VALUE(l);
				varity = l.v.i;
			}
			ar = activation_new_on_stack(varity, vm->current_ar, NULL, vm);
			for (i = varity - 1; i >= 0; i--) {
				POP_VALUE(l);
				activation_initialize_value(ar, i, l);
			}

			v = builtins[*vm->pc].fn(ar);
			activation_free_from_stack(ar, vm);
#ifdef DEBUG
			if (trace_vm) {
				printf("result was:\n");
				value_print(v);
				printf("\n");
			}
#endif
			if (builtins[*vm->pc].retval == 1)
				PUSH_VALUE(v);
		}
		vm->pc++;
	}

#ifdef DEBUG
	if (trace_vm) {
		printf("___ virtual machine finished ___\n");
	}
	/*printf("subs = %d\n", subs);*/
#endif
	return(VM_TERMINATED);
}
/*----------------------------------------------------------------------*/
void
send_handler(struct event_control *lecb)
{
  extern fd_set *rdfds, *wrfds;
  const struct event *evt = 0;
  int rc = 0;
  int event_processed = 1;
  int save_errno = 0;
  int sd;
  struct info *ip;
  int called_explicitly = 0;
  int events;
  int evt_count;
  int fake_events = 0;
  int trace_fd = 0;
  static int init_done = 0;
  int i;
  int buf_index = -1;
  int listen_sd = -1;

  if (!init_done) {
    for (i = 0; i < EVT_BUFS_MAX; i++) {
      saved_tail[i] = -1;
    }
    init_done = 1;
  }

  if (options.send_loop) {
    should_process_sds = 1;
  } else {
    should_process_sds = 0;
  }


  consecutive = 0;
  send_handler_evts_done = 0;

  in_handler = 1;
  /* turn off notification while processing the event */
  ecb->notify = SIG_GRP;

  if (lecb == NULL) {
    called_explicitly = 1;
    PRINT_TIME(NOFD, &tnow, &tprev,
      "---> send_handler: entered handler explicitly");
    lecb = ecb;
    DEBG(MSG_SEND, "send_handler: lecb was NULL\n");
    DEBG(MSG_SEND, "send_handler: lecb now = %p\n", lecb);
    DEBG(MSG_SEND, "send_handler: lecb->regs = %p\n", lecb->regs);
    for (i = 0; i < EVT_BUFS; i++) {
      DEBG(MSG_SEND,
        "send_handler: lecb->head[%d] = %d lecb->tail[%d] = %d\n",
        i, lecb->head[i], i, lecb->tail[i]);
    }
    t = bogus_regs;
    num_send_handler_calls++;
  } else {
    PRINT_TIME(NOFD, &tnow, &tprev,
      "---> send_handler: entered thru interrupt");
    for (i = 0; i < EVT_BUFS; i++) {
      DEBG(MSG_SEND,
        "send_handler: lecb->head[%d] = %d lecb->tail[%d] = %d\n",
        i, lecb->head[i], i, lecb->tail[i]);
    }
    DEBG(MSG_SEND, "send_handler: lecb->regs = %p\n", lecb->regs);
    num_send_handler_interrupts++;

    t = lecb->regs;
  }

  /* Changed to permit calling handler explicitly */
  DEBG(MSG_SEND, "send_handler: setting lecb->regs = 0xffffffff\n");
  lecb->regs = (void *) 0xffffffff;
  save_errno = errno;

  if (t != bogus_regs && t != 0) {
    if (t->reason > 0 && t->eax < 0) {
      DEBG(MSG_SEND, "send_handler: syscall failure: reason = %i\n",
        t->reason);
      dump_regs(t);
      dump_stack(t);
    }
    send_errno = (int) t->eax;
  } else {
    send_errno = 0;
  }

  DEBG(MSG_SEND, "send_handler: send_errno = %d\n", send_errno);

  /* either entering a critical section or in one so we just leave handler */
  if ((entering_cs || in_cs) && (!called_explicitly)) {
    PRINT_TIME(NOFD, &tnow, &tprev, "<--- send_handler: race detected\n");
    num_sigio_races++;
    send_intrs_to_handle++;
    goto get_out;
  }

  if ((!new_connections_on || sigio_blocked) && (!called_explicitly)) {
    PRINT_TIME(NOFD, &tnow, &tprev,
      "<--- send_handler: race detected - new connections not on\n");
    num_sigio_false++;
    send_intrs_to_handle++;
    goto get_out;
  }


  if (num_idle <= options.free_fd_thold) {
    DEBG(MSG_SEND, "WARNING! send_handler: entered num_idle = %d and "
      "thold = %d\n", num_idle, options.free_fd_thold);
  }

  for (i = 0; i < EVT_BUFS_MAX; i++) {
    saved_tail[i] = lecb->tail[i];
  }
  consecutive = 0;

  while (!(bufs_empty())) {

    /* decide which of the event buffers to process */
    buf_index = which_buf_to_process(buf_index);

    while ((!buf_is_empty(buf_index)) && (event_processed)) {
      PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: at top of loop");
      event_processed = 0;
      conns_off_if_needed();
      evt_count = num_evts_array();
      PRINT_TIME(NOFD, &tnow, &tprev,
        "send_handler: evt_count = %d", evt_count);

      for (i = 0; i < EVT_BUFS; i++) {
        PRINT_TIME(NOFD, &tnow, &tprev,
          "send_handler: head[%d] = %d tail[%d] = %d",
          i, lecb->head[i], i, lecb->tail[i]);
      }

      PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: missed = %d",
        ecb->missed_events);

#ifdef ONE_LISTENER
#ifdef TBB_KINFO
      if (kinfo) {
        PRINT_TIME(NOFD, &tnow, &tprev, "send_handler:  "
          "kinfo->qlen_young = %d kinfo->qlen = %d",
          kinfo->qlen_young, kinfo->qlen);
        PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: syscall "
          "qlen_young = %d        qlen = %d",
          qlen_young(server_sd), qlen(server_sd));
        PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: qlen_listenq = %d",
          qlen_listenq(server_sd));
      }
#endif /* TBB_KINFO */
#endif /* ONE_LISTENER */

      evt = get_next_event(lecb, &buf_index);

#ifdef DEBUG_ON
      verify_evt_array(evt, buf_index);
#endif

      if ((MSG_MASK & MSG_TIME) || (MSG_MASK & MSG_SEND)) {
        print_event(evt, t);
      }
#ifdef ARRAY_OF_BUFS
      num_events[buf_index]++;
#else
      num_events++;
#endif /* ARRAY_OF_BUFS */

      switch (evt->type) {

        case EVT_SIG:
          /* XXX: for now - turn off notification and delivery */
          /* because we are just going to print out some stats and exit */
          ecb->notify = 0;
          ecb->queue = 0;
          num_evt_sig++;
          event_processed = 1;
          PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: Processing EVT_SIG");
          DEBG(MSG_SEND, "send_handler: Processing EVT_SIG\n");
          DEBG(MSG_SEND, "send_handler: Received event: type = %d "
            "id = %d\n", evt->type, evt->event_id);
          if (t != bogus_regs) {
            PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: eip = %p", t->eip);
            DEBG(MSG_SEND, "send_handler: Received event: eip = %p\n",
              t->eip);
          }

          if (sigs[evt->event_id] && sigs[evt->event_id] !=
            (sighandlerfn_t) (-1)) {
            PRINT_TIME(NOFD, &tnow, &tprev,
              "send_handler: signal = %d calling handler", evt->event_id);
            sigs[evt->event_id] (0, evt);
          } else if (sigs[evt->event_id] != (sighandlerfn_t) (-1)) {
            DEBG(MSG_SEND, "send_handler: No handler for %i\n",
              evt->event_id);
            printf("send_handler: No handler for %i\n", evt->event_id);
            fflush(stdout);
            event_processed = 1;
            if (t) {
              dump_regs(t);
            }
            print_event(evt, t);
            exit(1);
          } else {
            printf("send_handler: evt->event_id = %d "
              "sigs[evt->event_id] = %p\n",
              evt->event_id, sigs[evt->event_id]);
            fflush(stdout);
            /* event_processed = 1; */
            if (t) {
              dump_regs(t);
            }
            print_event(evt, t);
            exit(1);
          }
          break;

        case EVT_MSG:
          num_evt_msg++;
          DEBG(MSG_SEND, "send_handler: Processing message from: %i\n",
            evt->event_id);

          /*
           * A previous read call failed (EAGAIN) even though the event
           * was generated saying that the socket was ready.
           * So for now this is a work around where we've been 
           * sent a message saying that we need to re-read 
           * from the socket indicated in the msg.
           */

          if ((int) evt->data.msg.data > 0) {
            sd = (int) evt->data.msg.data;
            ip = info_ptr(sd);
            assert(ip);
            fake_events = POLLIN | POLLRDNORM | POLLOUT |
              POLLWRNORM | POLLREADRETRY;
            rc = send_do_io(sd, ip, fake_events);
          }
          event_processed = 1;
          break;

        case EVT_IPACCEPT:
          num_evt_ipaccept++;
          /* TODO: not sure this is in the right place? */
          PRINT_TIME(NOFD, &tnow, &tprev,
            "send_handler: consecutive = %d num_idle = %d",
            consecutive, num_idle);
          sd = evt->data.ipa.fd;
          num_accept_calls++;
          PRINT_TIME(NOFD, &tnow, &tprev,
            "send_handler: Processing EVT_IPACCEPT sd = %d", sd);
          if (sd < 0) {
            /* In this case the sd contains the negated errno */
            process_accept_errs(sd, -sd);
            event_processed = 1;
          } else {
            events = evt->data.io.events;
            if (events & POLLFIN) {
              /*
               * We are going to bypass doing a bunch of work
               * on a socket that has already been closed by
               * the client. So first we have to do a bit of 
               * setup that subsequent code depends on (preconditions)
               */
              ip = info_add(sd);
              ip->sd = sd;
              set_fsm_state(ip, FSM_CONNECTING);
              num_connections++;
              num_accept_successful++;
              num_idle--;
              if (num_idle == options.free_fd_thold) {
                PRINT_TIME(sd, &tnow, &tprev,
                  "send_handler: ATTENTION: NUM_IDLE == %d",
                  options.free_fd_thold);
              }

              PRINT_TIME(sd, &tnow, &tprev, "send_handler: POLL_FIN "
                "calling do_close");
              num_close_send_early_fin++;
              rc = do_close(ip, REASON_SEND_POLL_FIN);
              event_processed = 1;
            } else {
              assert(sd <= max_fds);
              assert(sd > min_usable_sd);
              ip = info_add(sd);
              ip->sd = sd;
              set_fsm_state(ip, FSM_CONNECTING);
              /* Eventually we should add this to SEND */
              /* listen_sd = evt->data.ipa.parent_fd; */
              listen_sd = SOCK_LISTENER_USE_UNKNOWN;
              event_processed = send_handle_new_conn(evt, listen_sd, sd,
                should_process_sds);

#ifdef NOT_SURE
              if ((options.do_multiaccept == 0 && consecutive == 1) ||
                ((options.multiaccept_max !=
                    OPT_MULTIACCEPT_MAX_UNLIMITED) &&
                  (consecutive >= options.multiaccept_max))) {
                PRINT_TIME(NOFD, &tnow, &tprev,
                  "send_handler: reached max accepts");
                socket_new_conn_off();
              }
#endif

              /* if the socket is has data to be read go ahead and 
               * read it
               */
              if (!options.accepts_only && should_process_sds) {
                if (event_processed && evt->data.io.events & POLLIN) {
                  /* TODO: this may be wrong as the event has been 
                   * processed
                   * event_processed = 
                   * send_do_io(sd, ip, evt->data.io.events);
                   */
                  rc = send_do_io(sd, ip, evt->data.io.events);
                }
              }
            } /* else */
          } /* else */
          break;

        case EVT_IOREADY:
          num_evt_ioready++;
          /* TODO: not sure this is in the right place? */
          PRINT_TIME(NOFD, &tnow, &tprev,
            "send_handler: consecutive = %d num_idle = %d",
            consecutive, num_idle);
          sd = evt->data.io.fd;
          events = evt->data.io.events;
          PRINT_TIME(sd, &tnow, &tprev,
            "send_handler: Processing EVT_IOREADY");

          if (sd < 0) {
            PRINT_TIME(sd, &tnow, &tprev,
              "send_handler: Processing EVT_IOREADY");
            printf("send_handler: got negative sd on EVT_IOREADY\n");
            print_event(evt, t);
            event_processed = 1;
          } else if (sock_is_listener(sd)) {
            if (events & POLLIN) {
              listen_sd = sd;
              ip = info_add(sd);
              event_processed = send_handle_new_conn(evt, listen_sd, sd,
                should_process_sds);
              /* TODO: This is just to test if it changes anything */
              /* send_async_setup(sd); */

            } else {
              PRINT_TIME(sd, &tnow, &tprev,
                "send_handler: listener_sd EVT_IOREADY but POLLIN "
                "is not set events = 0x%x", events);
              /* is pollhint set and nothing else */
              if ((events | POLLHINT) == POLLHINT) {
                PRINT_TIME(sd, &tnow, &tprev,
                  "send_handler: listener_sd no POLLIN but POLLHINT");
                num_pollhint_server_consumed++;
              }
              event_processed = 1;
            }
          } else {
            if (!options.accepts_only && should_process_sds) {
              ip = info_ptr(sd);
              assert(ip);
              event_processed = send_do_io(sd, ip, events);
              if (!event_processed) {
                printf("send_handler: event not processed\n");
                print_event(evt, t);
                exit(1);
              }
            } else {
              PRINT_TIME(sd, &tnow, &tprev,
                "send_handler: skipping events 0x%x", events);
              PRINT_TIME(sd, &tnow, &tprev,
                "send_handler: accepts_only = %d "
                "should_process_sds = %d",
                options.accepts_only, should_process_sds);

              event_processed = 1;
            }
          }

          if (!event_processed) {
            printf("send_handler: event not processed\n");
            print_event(evt, t);
            exit(1);
          }
          break;

        case EVT_DISPATCH:
          num_evt_dispatch++;
          DEBG(MSG_SEND, "send_handler: Processing EVT_DISPATCH\n");

        case EVT_SYNCH:
          num_evt_synch++;
          DEBG(MSG_SEND, "send_handler: Processing EVT_SYNCH\n");

        default:
          num_evt_unknown++;
          PRINT_TIME(NOFD, &tnow, &tprev, "send_handler: doing event");
          DEBG(MSG_SEND, "send_handler: lecb->head[%d] = %d "
            "lecb->tail[%d] = %d lecb->event_list_size[%d] = %d\n",
            buf_index, lecb->head[buf_index],
            buf_index, lecb->tail[buf_index],
            buf_index, lecb->event_list_size[buf_index]);
          DEBG(MSG_SEND, "send_handler: evt->type = %d\n", evt->type);
          DEBG(MSG_SEND, "send_handler: Fell down into default\n");
          print_event(evt, t);
          exit(1);
          break;

      } /* switch */

      /* NEW */
      /* Did not process the event this time around */
      /* so leave it in the buffer/queue to be processed later */
      if (event_processed) {
        /* num_events++; */
        send_handler_evts_done++;
        ++lecb->head[buf_index];
        DEBG(MSG_SEND,
          "send_handler: event processed head for %d now = %d\n",
          buf_index, lecb->head[buf_index]);
        /* evt->type= (u32) - 1; */
        send_need_to_check_events = 0;
      } else {
        DEBG(MSG_SEND,
          "send_handler: event not processed head for %d still = %d\n",
          buf_index, lecb->head[buf_index]);
        PRINT_TIME(NOFD, &tnow, &tprev,
          "send_handler: event not processed head for %d still = %d\n",
          buf_index, lecb->head[buf_index]);
        printf("send_handler: event not processed head for %d still = %d\n",
          buf_index, lecb->head[buf_index]);
        send_need_to_check_events = 1;
        printf("send_handler: if EVT_IPACCEPT fd = %d\n", evt->data.ipa.fd);
        printf("send_handler: if EVT_IOREADY fd = %d\n", evt->data.io.fd);
        printf("send_handler: calling exit\n");
        print_event(evt, t);
        exit(1);
      }
      DEBG(MSG_SEND, "send_handler: send_need_to_check_events = %d\n",
        send_need_to_check_events);
      PRINT_TIME(NOFD, &tnow, &tprev,
        "send_handler: at bottom of loop buf_index = %d "
        "head = %d tail = %d",
        buf_index, ecb->head[buf_index], ecb->tail[buf_index]);
    } /* while */

  } /* while */

  if (consecutive > num_max_consecutive_accepts) {
    num_max_consecutive_accepts = consecutive;
  }

get_out:

  if (consecutive > 0) {
    if (options.use_memcpy) {
      memcpy(readable, rdfds, sizeof(fd_set));
      memcpy(writable, wrfds, sizeof(fd_set));
    } else {
      *readable = *rdfds;
      *writable = *wrfds;
    }

    if (options.process_sds_order == OPT_PROCESS_SDS_LIFO) {
      q_sync(Q_ADD_TO_FRONT);
    } else if (options.process_sds_order == OPT_PROCESS_SDS_FIFO) {
      q_sync(Q_ADD_TO_REAR);
    }
  }

  /* Changed to allow handler to be called explicitly. */
  lecb->regs = NULL;

  if (t != bogus_regs) {
    if (t->reason > 0 && t->eax < 0) {
      DEBG(MSG_SEND, "send_handler: Syscall failure: %i\n", t->reason);
      dump_regs(t);
      dump_stack(t);
      exit(1);
    }
    send_errno = (int) t->eax;
  } else {
    send_errno = 0;
  }

  /* DEBG(MSG_SEND, "send_handler: About to do restore\n"); */
  DEBG(MSG_SEND, "send_handler: send_errno = %d\n", send_errno);
  DEBG(MSG_SEND, "send_handler: save_errno = %d\n", save_errno);
  DEBG(MSG_SEND, "send_handler: send_need_to_check_events = %d\n",
    send_need_to_check_events);

  PRINT_TIME(NOFD, &tnow, &tprev,
    "send_handler: consecutive = %d evts_done = %d",
    consecutive, send_handler_evts_done);

  /* This was used for debugging */
  /* memcpy(&old_regs, t, sizeof(old_regs)); */
  /* old_regs_addr = t; */
  errno = save_errno;
  if (t != bogus_regs) {
    PRINT_TIME(NOFD, &tnow, &tprev, "restore_thread: eip = %p", t->eip);
    PRINT_TIME(NOFD, &tnow, &tprev, "restore_thread: flags = %p", t->flags);

    TRACE(EVT_SENDHANDLER, trace_fd = 0; rc = send_handler_evts_done;);
Example #15
0
ssize_t rawfs_reg_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
		unsigned long nr_segs, loff_t pos)
{
    struct file *filp = iocb->ki_filp;
    struct super_block *sb = filp->f_path.dentry->d_sb;
    struct rawfs_sb_info *rawfs_sb = RAWFS_SB(sb);
//    struct address_space *mapping=filp->f_mapping;
    struct inode *inode = filp->f_mapping->host;
    struct rawfs_inode_info *inode_info = RAWFS_I(inode);

    ssize_t retval;
//	size_t count;
	loff_t *ppos = &iocb->ki_pos;
    struct rawfs_file_info *fi = NULL;
    unsigned int curr_file_pos = pos;
    unsigned int curr_buf_pos = 0;
    int starting_page, total_pages;
    int remain_buf_size;
    int result;
    struct rawfs_file_list_entry * entry;
    const struct iovec *iv = &iov[0];  // TODO: Process all io vectors

    // Always use direct I/O
    //	loff_t size;
    //  int block_no;

    // Get Lock
    mutex_lock(&rawfs_sb->rawfs_lock);

    retval=iov_length(iov, nr_segs);

    RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_reg_file_aio_write %s, pos %lld, "
        "len %d\n", inode_info->i_name, pos, retval);

    fi = kzalloc(sizeof(struct rawfs_file_info), GFP_NOFS);

    if (!fi) {
        retval = 0;
        goto out;
    }

    /* rawfs_fill_file_info(inode , fi); */
    rawfs_fill_fileinfo_by_dentry(filp->f_path.dentry, fi);

    /* Update file_info */
    if ((pos+retval) > fi->i_size)
        fi->i_size = pos+retval;

    fi->i_chunk_index = 1;
    fi->i_chunk_total = S_ISDIR(fi->i_mode) ? 1 : CEILING((unsigned)fi->i_size,
        rawfs_sb->page_data_size);

    /* do GC, if the required space is not enough */
    result = rawfs_reserve_space(sb, fi->i_chunk_total);
    if (result<0) {
        retval=result;
            goto out;
        }

    // get entry from file list
    entry = rawfs_file_list_get(sb, fi->i_name, fi->i_parent_folder_id);

    if (!entry) {
        RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_reg_file_aio_write: %s file list "
            "entry missing\n", fi->i_name);
        dump_stack();
        goto out;
    }

    // Write File
    {
        int preceding_pages, rear_pages;
        struct rawfs_page *page_buf = NULL;
        int i;

        // Prepare page buffer
        page_buf = kzalloc(rawfs_sb->page_size, GFP_NOFS);

        if (page_buf == NULL) {
            retval = 0;
            goto out;
        }

        starting_page = rawfs_sb->data_block_free_page_index;
        preceding_pages = FLOOR((unsigned)pos, rawfs_sb->page_data_size);
        total_pages = CEILING((unsigned)fi->i_size, rawfs_sb->page_data_size);
        rear_pages = CEILING((unsigned)pos + iv->iov_len,
            rawfs_sb->page_data_size);
        remain_buf_size = iv->iov_len;

        RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_reg_file_aio_write %s, "
            "preceding_pages %d, rear_pages %d, total_pages %d, "
            "remain_buf_size %d\n",
             inode_info->i_name, preceding_pages, rear_pages, total_pages,
             remain_buf_size);


        // Step 1: Copy preceding pages, if starting pos is not 0.
        for (i=0;i<total_pages;i++)
        {
            // Read, if necessary, (no need for new files)
            if ( (i<=preceding_pages) || (i>=rear_pages))
            rawfs_sb->dev.read_page(sb,
                entry->i_location_block,
                entry->i_location_page+i,
                page_buf);

            // Update info
            memcpy(&page_buf->i_info.i_file_info, fi,
                sizeof(struct rawfs_file_info));

            // Within Modify Range: Copy Modify
            if ((i>=preceding_pages) && (i<=rear_pages))
            {
                int start_in_buf;
                int copy_len;

                start_in_buf = (curr_file_pos % rawfs_sb->page_data_size);
                copy_len =  ((start_in_buf + remain_buf_size) >
                            rawfs_sb->page_data_size) ?
                            (rawfs_sb->page_data_size - start_in_buf) :
                            remain_buf_size;

                if (copy_from_user(&page_buf->i_data[0] + start_in_buf,
                    (char*)iv->iov_base + curr_buf_pos, copy_len))
                {
                    retval = -EFAULT;
                    goto out2;
                }

                curr_buf_pos    += copy_len;
                remain_buf_size -= copy_len;

                RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_reg_file_aio_write %s, %d, "
                    "curr_buf_pos %d, remain_buf_size %d start_in_buf %d "
                    "copy_len %d starting pattern %X\n",
                inode_info->i_name, i, curr_buf_pos, remain_buf_size,
                start_in_buf, copy_len,
                *(unsigned int*)(&page_buf->i_data[0] + start_in_buf));
            }

            rawfs_page_signature(sb, page_buf);

            // Write
            rawfs_sb->dev.write_page(sb,
                rawfs_sb->data_block,
                rawfs_sb->data_block_free_page_index,
                page_buf);

            rawfs_sb->data_block_free_page_index++;
            fi->i_chunk_index++;
        }

out2:
        if (page_buf)
            kfree(page_buf);
    }

	if (retval > 0) {
		*ppos = pos + retval;
	}

    file_accessed(filp);

    // Update Inode: file size, block, page
    i_size_write(inode, fi->i_size);
    // TODO: Get inode lock when update
    inode_info->i_location_block = rawfs_sb->data_block;
    inode_info->i_location_page = starting_page;
    inode_info->i_location_page_count = total_pages;

    // update location
    entry->i_location_block = rawfs_sb->data_block;
    entry->i_location_page = starting_page;
    entry->i_location_page_count = total_pages;

out:
    if (fi)
        kfree(fi);

    // Release Lock
    mutex_unlock(&rawfs_sb->rawfs_lock);

    return retval;
}
Example #16
0
/*
 * The interrupt handling path, implemented in terms of HV interrupt
 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
 */
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
	int depth = __get_cpu_var(irq_depth)++;
	unsigned long original_irqs;
	unsigned long remaining_irqs;
	struct pt_regs *old_regs;

#if CHIP_HAS_IPI()
	/*
	 * Pending interrupts are listed in an SPR.  We might be
	 * nested, so be sure to only handle irqs that weren't already
	 * masked by a previous interrupt.  Then, mask out the ones
	 * we're going to handle.
	 */
	unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
	original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
	__insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
#else
	/*
	 * Hypervisor performs the equivalent of the Gx code above and
	 * then puts the pending interrupt mask into a system save reg
	 * for us to find.
	 */
	original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
#endif
	remaining_irqs = original_irqs;

	/* Track time spent here in an interrupt context. */
	old_regs = set_irq_regs(regs);
	irq_enter();

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: less than 1/8th stack free? */
	{
		long sp = stack_pointer - (long) current_thread_info();
		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
			pr_emerg("tile_dev_intr: "
			       "stack overflow: %ld\n",
			       sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif
	while (remaining_irqs) {
		unsigned long irq = __ffs(remaining_irqs);
		remaining_irqs &= ~(1UL << irq);

		/* Count device irqs; Linux IPIs are counted elsewhere. */
		if (irq != IRQ_RESCHEDULE)
			__get_cpu_var(irq_stat).irq_dev_intr_count++;

		generic_handle_irq(irq);
	}

	/*
	 * If we weren't nested, turn on all enabled interrupts,
	 * including any that were reenabled during interrupt
	 * handling.
	 */
	if (depth == 0)
		unmask_irqs(~__get_cpu_var(irq_disable_mask));

	__get_cpu_var(irq_depth)--;

	/*
	 * Track time spent against the current process again and
	 * process any softirqs if they are waiting.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
	struct pt_regs *regs = get_irq_regs();
	int duration;
	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* test for hardlockups on the next cpu */
	watchdog_check_hardlockup_other_cpu();

	/* kick the softlockup detector */
	wake_up_process(__this_cpu_read(softlockup_watchdog));

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	if (touch_ts == 0) {
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
			__this_cpu_write(softlockup_touch_sync, false);
			sched_clock_tick();
		}

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
	duration = is_softlockup(touch_ts);
	if (unlikely(duration)) {
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

		/* only warn once */
		if (__this_cpu_read(soft_watchdog_warn) == true) {
			/*
			 * When multiple processes are causing softlockups the
			 * softlockup detector only warns on the first one
			 * because the code relies on a full quiet cycle to
			 * re-arm.  The second process prevents the quiet cycle
			 * and never gets reported.  Use task pointers to detect
			 * this.
			 */
			if (__this_cpu_read(softlockup_task_ptr_saved) !=
			    current) {
				__this_cpu_write(soft_watchdog_warn, false);
				__touch_watchdog();
			}
			return HRTIMER_RESTART;
		}

		if (softlockup_all_cpu_backtrace) {
			/* Prevent multiple soft-lockup reports if one cpu is already
			 * engaged in dumping cpu back traces
			 */
			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
				/* Someone else will report us. Let's give up */
				__this_cpu_write(soft_watchdog_warn, true);
				return HRTIMER_RESTART;
			}
		}

		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
			smp_processor_id(), duration,
			current->comm, task_pid_nr(current));
		__this_cpu_write(softlockup_task_ptr_saved, current);
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_all_cpu_backtrace) {
			/* Avoid generating two back traces for current
			 * given that one is already made above
			 */
			trigger_allbutself_cpu_backtrace();

			clear_bit(0, &soft_lockup_nmi_warn);
			/* Barrier to sync with other cpus */
			smp_mb__after_atomic();
		}

		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
		if (softlockup_panic)
			panic("softlockup: hung tasks");
		__this_cpu_write(soft_watchdog_warn, true);
	} else
		__this_cpu_write(soft_watchdog_warn, false);

	return HRTIMER_RESTART;
}
Example #18
0
int main(int argc, char *argv[])
{
    cpu_t *cpu = cpu_create();
    uint8_t opcode;

    FILE *file = NULL;
    char *dump_ram = NULL;
    char *dump_flash = NULL;
    char **buffer = NULL;
    int i;

    if(argc != 2 && argc != 4 && argc != 6)
    {
        puts("usage: fsim <in> [--dumpram|-r <ram filename>] [--dumpflash|-f <flash filename>]");
        return EXIT_SUCCESS;
    }
    for (i = 2; i < argc; i++)
    {
        if (memcmp("--dumpram", argv[i], 9) == 0 || memcmp("-r", argv[i], 2) == 0)
        {
            buffer = &dump_ram;
        }
        else if (memcmp("--dumpflash", argv[i], 11) == 0 || memcmp("-f", argv[i], 2) == 0)
        {
            buffer = &dump_flash;
        }
        else if (buffer)
        {
            *buffer = argv[i];
            buffer = NULL;
        }
        else
        {
            printf("unknown option \"%s\"\n", argv[i]);
            return EXIT_FAILURE;
        }
    }
    if (buffer)
    {
         puts("Not all options set");
         return EXIT_FAILURE;
    }

    file = fopen(argv[1], "r");

    if(!file)
    {
        printf("could not open file \"%s\"",argv[1]);
        return EXIT_FAILURE;
    }

    fread(cpu->flash, sizeof(cpu->flash), 1, file);

    fclose(file);
    file = NULL;

    printf("First 160 bytes of flash:");
    for(i = 0;i < 160; i++) {
        if (i % 16 == 0)
        {
            printf("\n%08x:", i + 0x01000000);
        }
        if (i % 2 == 0)
        {
            printf(" ");
        }
        printf("%02x", cpu->flash[i]);
    }
    printf("\n\n");

    while(!cpu->status)
    {
        opcode = cpu_step(cpu);
    }
    switch (cpu->status)
    {
        case 2:
            printf("Illegal opcode \"%02x\"\n", opcode);
            break;
        case 1:
            puts("");
            puts("####################");
            puts("#        ##        #");
            puts("#### Halted CPU ####");
            puts("#        ##        #");
            puts("####################");
            puts("");
            break;
        default:
            printf("Unknown exit status %d", cpu->status);
    }

    puts("Register Dump:");
    printf("A  = %08x\n", cpu->a);
    printf("X  = %08x\n", cpu->x);
    printf("PC = %08x\n", cpu->pc);
    printf("SP = %08x\n", cpu->sp);
    printf("z  = %01d\n", cpu->z);
    printf("n  = %01d\n", cpu->n);
    printf("i  = %01d\n", cpu->i);
    printf("if = %02x\n", cpu->interrupt_flags);
    printf("iv = %08x\n", cpu->interrupt_vector);
    puts("");
    dump_stack(cpu, 10);

    if (dump_flash)
    {
        file = fopen(dump_flash, "w");
        if(!file)
        {
            printf("could not open flash file \"%s\"", dump_flash);
        } else {
            fwrite(cpu->flash, sizeof(cpu->flash), 1, file);
            fclose(file);
            printf("Dumped flash to \"%s\"\n", dump_flash);
        }
    }

    if (dump_ram)
    {
        file = fopen(dump_ram, "w");
        if(!file)
        {
            printf("could not open ram file \"%s\"", dump_ram);
        } else {
            fwrite(cpu->ram, sizeof(cpu->ram), 1, file);
            fclose(file);
            printf("Dumped ram to \"%s\"\n", dump_ram);
        }
    }
    cpu = cpu_free(cpu);

    return 0;
}
/* time is about to run out and the scu will reset soon.  quickly
 * dump debug data to logbuffer and emmc via calling panic before lights
 * go out.
 */
static void smp_dumpstack(void *info)
{
	dump_stack();
}
Example #20
0
/**
 * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
 * @edev: eeh device
 *
 * Check for an EEH failure for the given device node.  Call this
 * routine if the result of a read was all 0xff's and you want to
 * find out if this is due to an EEH slot freeze.  This routine
 * will query firmware for the EEH status.
 *
 * Returns 0 if there has not been an EEH error; otherwise returns
 * a non-zero value and queues up a slot isolation event notification.
 *
 * It is safe to call this routine in an interrupt context.
 */
int eeh_dev_check_failure(struct eeh_dev *edev)
{
	int ret;
	int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
	unsigned long flags;
	struct pci_dn *pdn;
	struct pci_dev *dev;
	struct eeh_pe *pe, *parent_pe, *phb_pe;
	int rc = 0;
	const char *location = NULL;

	eeh_stats.total_mmio_ffs++;

	if (!eeh_enabled())
		return 0;

	if (!edev) {
		eeh_stats.no_dn++;
		return 0;
	}
	dev = eeh_dev_to_pci_dev(edev);
	pe = eeh_dev_to_pe(edev);

	/* Access to IO BARs might get this far and still not want checking. */
	if (!pe) {
		eeh_stats.ignored_check++;
		pr_debug("EEH: Ignored check for %s\n",
			eeh_pci_name(dev));
		return 0;
	}

	if (!pe->addr && !pe->config_addr) {
		eeh_stats.no_cfg_addr++;
		return 0;
	}

	/*
	 * On PowerNV platform, we might already have fenced PHB
	 * there and we need take care of that firstly.
	 */
	ret = eeh_phb_check_failure(pe);
	if (ret > 0)
		return ret;

	/*
	 * If the PE isn't owned by us, we shouldn't check the
	 * state. Instead, let the owner handle it if the PE has
	 * been frozen.
	 */
	if (eeh_pe_passed(pe))
		return 0;

	/* If we already have a pending isolation event for this
	 * slot, we know it's bad already, we don't need to check.
	 * Do this checking under a lock; as multiple PCI devices
	 * in one slot might report errors simultaneously, and we
	 * only want one error recovery routine running.
	 */
	eeh_serialize_lock(&flags);
	rc = 1;
	if (pe->state & EEH_PE_ISOLATED) {
		pe->check_count++;
		if (pe->check_count % EEH_MAX_FAILS == 0) {
			pdn = eeh_dev_to_pdn(edev);
			if (pdn->node)
				location = of_get_property(pdn->node, "ibm,loc-code", NULL);
			printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
				"location=%s driver=%s pci addr=%s\n",
				pe->check_count,
				location ? location : "unknown",
				eeh_driver_name(dev), eeh_pci_name(dev));
			printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
				eeh_driver_name(dev));
			dump_stack();
		}
		goto dn_unlock;
	}

	/*
	 * Now test for an EEH failure.  This is VERY expensive.
	 * Note that the eeh_config_addr may be a parent device
	 * in the case of a device behind a bridge, or it may be
	 * function zero of a multi-function device.
	 * In any case they must share a common PHB.
	 */
	ret = eeh_ops->get_state(pe, NULL);

	/* Note that config-io to empty slots may fail;
	 * they are empty when they don't have children.
	 * We will punt with the following conditions: Failure to get
	 * PE's state, EEH not support and Permanently unavailable
	 * state, PE is in good state.
	 */
	if ((ret < 0) ||
	    (ret == EEH_STATE_NOT_SUPPORT) ||
	    ((ret & active_flags) == active_flags)) {
		eeh_stats.false_positives++;
		pe->false_positives++;
		rc = 0;
		goto dn_unlock;
	}

	/*
	 * It should be corner case that the parent PE has been
	 * put into frozen state as well. We should take care
	 * that at first.
	 */
	parent
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct netfront_stats *stats = this_cpu_ptr(np->stats);
	struct xen_netif_tx_request *tx;
	struct xen_netif_extra_info *extra;
	char *data = skb->data;
	RING_IDX i;
	grant_ref_t ref;
	unsigned long mfn;
	int notify;
	int frags = skb_shinfo(skb)->nr_frags;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);
	unsigned long flags;

	/* If skb->len is too big for wire format, drop skb and alert
	 * user about misconfiguration.
	 */
	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
		net_alert_ratelimited(
			"xennet: skb->len = %u, too big for wire format\n",
			skb->len);
		goto drop;
	}

	frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
		       frags);
		dump_stack();
		goto drop;
	}

	spin_lock_irqsave(&np->tx_lock, flags);

	if (unlikely(!netif_carrier_ok(dev) ||
		     (frags > 1 && !xennet_can_sg(dev)) ||
		     netif_needs_gso(skb, netif_skb_features(skb)))) {
		spin_unlock_irqrestore(&np->tx_lock, flags);
		goto drop;
	}

	i = np->tx.req_prod_pvt;

	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
	np->tx_skbs[id].skb = skb;

	tx = RING_GET_REQUEST(&np->tx, i);

	tx->id   = id;
	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
	BUG_ON((signed short)ref < 0);
	mfn = virt_to_mfn(data);
	gnttab_grant_foreign_access_ref(
		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
	tx->gref = np->grant_tx_ref[id] = ref;
	tx->offset = offset;
	tx->size = len;
	extra = NULL;

	tx->flags = 0;
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		/* local packet? */
		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
		/* remote but checksummed. */
		tx->flags |= XEN_NETTXF_data_validated;

	if (skb_shinfo(skb)->gso_size) {
		struct xen_netif_extra_info *gso;

		gso = (struct xen_netif_extra_info *)
			RING_GET_REQUEST(&np->tx, ++i);

		if (extra)
			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
		else
			tx->flags |= XEN_NETTXF_extra_info;

		gso->u.gso.size = skb_shinfo(skb)->gso_size;
		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;
		extra = gso;
	}

	np->tx.req_prod_pvt = i + 1;

	xennet_make_frags(skb, dev, tx);
	tx->size = skb->len;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);

	u64_stats_update_begin(&stats->syncp);
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	u64_stats_update_end(&stats->syncp);

	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
	xennet_tx_buf_gc(dev);

	if (!netfront_tx_slot_available(np))
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&np->tx_lock, flags);

	return NETDEV_TX_OK;

 drop:
	dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Example #22
0
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
	struct pt_regs *regs = get_irq_regs();
	int duration;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
	wake_up_process(__this_cpu_read(softlockup_watchdog));

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	if (touch_ts == 0) {
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
			__this_cpu_write(softlockup_touch_sync, false);
			sched_clock_tick();
		}

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
	duration = is_softlockup(touch_ts);
	if (unlikely(duration)) {
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

		/* only warn once */
		if (__this_cpu_read(soft_watchdog_warn) == true)
			return HRTIMER_RESTART;

		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
			smp_processor_id(), duration,
			current->comm, task_pid_nr(current));
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_panic)
			panic("softlockup: hung tasks");
		__this_cpu_write(soft_watchdog_warn, true);
	} else
		__this_cpu_write(soft_watchdog_warn, false);

	return HRTIMER_RESTART;
}
Example #23
0
/**
 * ubi_io_read - read data from a physical eraseblock.
 * @ubi: UBI device description object
 * @buf: buffer where to store the read data
 * @pnum: physical eraseblock number to read from
 * @offset: offset within the physical eraseblock from where to read
 * @len: how many bytes to read
 *
 * This function reads data from offset @offset of physical eraseblock @pnum
 * and stores the read data in the @buf buffer. The following return codes are
 * possible:
 *
 * o %0 if all the requested data were successfully read;
 * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
 *   correctable bit-flips were detected; this is harmless but may indicate
 *   that this eraseblock may become bad soon (but do not have to);
 * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
 *   example it can be an ECC error in case of NAND; this most probably means
 *   that the data is corrupted;
 * o %-EIO if some I/O error occurred;
 * o other negative error codes in case of other errors.
 */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
		int len)
{
	int err, retries = 0;
	size_t read;
	loff_t addr;

	dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);

	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
	ubi_assert(len > 0);

	err = self_check_not_bad(ubi, pnum);
	if (err)
		return err;

	/*
	 * Deliberately corrupt the buffer to improve robustness. Indeed, if we
	 * do not do this, the following may happen:
	 * 1. The buffer contains data from previous operation, e.g., read from
	 *    another PEB previously. The data looks like expected, e.g., if we
	 *    just do not read anything and return - the caller would not
	 *    notice this. E.g., if we are reading a VID header, the buffer may
	 *    contain a valid VID header from another PEB.
	 * 2. The driver is buggy and returns us success or -EBADMSG or
	 *    -EUCLEAN, but it does not actually put any data to the buffer.
	 *
	 * This may confuse UBI or upper layers - they may think the buffer
	 * contains valid data while in fact it is just old data. This is
	 * especially possible because UBI (and UBIFS) relies on CRC, and
	 * treats data as correct even in case of ECC errors if the CRC is
	 * correct.
	 *
	 * Try to prevent this situation by changing the first byte of the
	 * buffer.
	 */
	*((uint8_t *)buf) ^= 0xFF;

	addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
	err = mtd_read(ubi->mtd, addr, len, &read, buf);
	if (err) {
		const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";

		if (mtd_is_bitflip(err)) {
			/*
			 * -EUCLEAN is reported if there was a bit-flip which
			 * was corrected, so this is harmless.
			 *
			 * We do not report about it here unless debugging is
			 * enabled. A corresponding message will be printed
			 * later, when it is has been scrubbed.
			 */
			ubi_msg("fixable bit-flip detected at PEB %d", pnum);
			ubi_assert(len == read);
			return UBI_IO_BITFLIPS;
		}

		if (retries++ < UBI_IO_RETRIES) {
			ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
				 err, errstr, len, pnum, offset, read);
			yield();
			goto retry;
		}

		ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
			err, errstr, len, pnum, offset, read);
		dump_stack();

		/*
		 * The driver should never return -EBADMSG if it failed to read
		 * all the requested data. But some buggy drivers might do
		 * this, so we change it to -EIO.
		 */
		if (read != len && mtd_is_eccerr(err)) {
			ubi_assert(0);
			err = -EIO;
		}
	} else {
		ubi_assert(len == read);

		if (ubi_dbg_is_bitflip(ubi)) {
			dbg_gen("bit-flip (emulated)");
			err = UBI_IO_BITFLIPS;
		}
	}

	return err;
}
Example #24
0
File: util.c Project: 19Dan01/linux
void sighandler_dump_stack(int sig)
{
	psignal(sig, "perf");
	dump_stack();
	exit(sig);
}
Example #25
0
/**
 * do_sync_erase - synchronously erase a physical eraseblock.
 * @ubi: UBI device description object
 * @pnum: the physical eraseblock number to erase
 *
 * This function synchronously erases physical eraseblock @pnum and returns
 * zero in case of success and a negative error code in case of failure. If
 * %-EIO is returned, the physical eraseblock most probably went bad.
 */
static int do_sync_erase(struct ubi_device *ubi, int pnum)
{
	int err, retries = 0;
	struct erase_info ei;
	wait_queue_head_t wq;

	dbg_io("erase PEB %d", pnum);
	ubi_assert(pnum >= 0 && pnum < ubi->peb_count);

	if (ubi->ro_mode) {
		ubi_err("read-only mode");
		return -EROFS;
	}

retry:
	init_waitqueue_head(&wq);
	memset(&ei, 0, sizeof(struct erase_info));

	ei.mtd      = ubi->mtd;
	ei.addr     = (loff_t)pnum * ubi->peb_size;
	ei.len      = ubi->peb_size;
	ei.callback = erase_callback;
	ei.priv     = (unsigned long)&wq;

	err = mtd_erase(ubi->mtd, &ei);
	atomic_inc(&ubi->ec_count); //MTK
	if (err) {
		if (retries++ < UBI_IO_RETRIES) {
			ubi_warn("error %d while erasing PEB %d, retry",
				 err, pnum);
			yield();
			goto retry;
		}
		ubi_err("cannot erase PEB %d, error %d", pnum, err);
		dump_stack();
		return err;
	}

	err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
					   ei.state == MTD_ERASE_FAILED);
	if (err) {
		ubi_err("interrupted PEB %d erasure", pnum);
		return -EINTR;
	}

	if (ei.state == MTD_ERASE_FAILED) {
		if (retries++ < UBI_IO_RETRIES) {
			ubi_warn("error while erasing PEB %d, retry", pnum);
			yield();
			goto retry;
		}
		ubi_err("cannot erase PEB %d", pnum);
		dump_stack();
		return -EIO;
	}

	err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
	if (err)
		return err;

	if (ubi_dbg_is_erase_failure(ubi)) {
		ubi_err("cannot erase PEB %d (emulated)", pnum);
		return -EIO;
	}

	return 0;
}
static int android_oom_handler(struct notifier_block *nb,
				      unsigned long val, void *data)
{
	struct task_struct *tsk;
#ifdef MULTIPLE_OOM_KILLER
	struct task_struct *selected[OOM_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef MULTIPLE_OOM_KILLER
	int selected_tasksize[OOM_DEPTH] = {0,};
	int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1);

	unsigned long *freed = data;

	/* show status */
	pr_warning("%s invoked Android-oom-killer: "
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, current->signal->oom_adj,
		current->signal->oom_score_adj);
	dump_stack();
	show_mem(SHOW_MEM_FILTER_NODES);
	if (__ratelimit(&oom_rs))
		dump_tasks_info();

	min_score_adj = 0;
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef MULTIPLE_OOM_KILLER
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

		lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
		if (all_selected_oom < OOM_DEPTH) {
			for (i = 0; i < OOM_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < OOM_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == OOM_DEPTH) {
				for (i = 0; i < OOM_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
					size %d, to kill\n",
				max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++) {
		if (selected[i]) {
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
				     size %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i]);
			send_sig(SIGKILL, selected[i], 0);
			rem -= selected_tasksize[i];
			*freed += (unsigned long)selected_tasksize[i];
#ifdef OOM_COUNT_READ
			oom_count++;
#endif

		}
	}
#else
	if (selected) {
		lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		*freed += (unsigned long)selected_tasksize;
#ifdef OOM_COUNT_READ
		oom_count++;
#endif
	}
#endif
	read_unlock(&tasklist_lock);

	lowmem_print(2, "oom: get memory %lu", *freed);
	return rem;
}
Example #27
0
/**
 * ubifs_check_node - check node.
 * @c: UBIFS file-system description object
 * @buf: node to check
 * @lnum: logical eraseblock number
 * @offs: offset within the logical eraseblock
 * @quiet: print no messages
 * @must_chk_crc: indicates whether to always check the CRC
 *
 * This function checks node magic number and CRC checksum. This function also
 * validates node length to prevent UBIFS from becoming crazy when an attacker
 * feeds it a file-system image with incorrect nodes. For example, too large
 * node length in the common header could cause UBIFS to read memory outside of
 * allocated buffer when checking the CRC checksum.
 *
 * This function may skip data nodes CRC checking if @c->no_chk_data_crc is
 * true, which is controlled by corresponding UBIFS mount option. However, if
 * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
 * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are
 * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC
 * is checked. This is because during mounting or re-mounting from R/O mode to
 * R/W mode we may read journal nodes (when replying the journal or doing the
 * recovery) and the journal nodes may potentially be corrupted, so checking is
 * required.
 *
 * This function returns zero in case of success and %-EUCLEAN in case of bad
 * CRC or magic.
 */
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
		     int offs, int quiet, int must_chk_crc)
{
	int err = -EINVAL, type, node_len;
	uint32_t crc, node_crc, magic;
	const struct ubifs_ch *ch = buf;

	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
	ubifs_assert(!(offs & 7) && offs < c->leb_size);

	magic = le32_to_cpu(ch->magic);
	if (magic != UBIFS_NODE_MAGIC) {
		if (!quiet)
			ubifs_err("bad magic %#08x, expected %#08x",
				  magic, UBIFS_NODE_MAGIC);
		err = -EUCLEAN;
		goto out;
	}

	type = ch->node_type;
	if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
		if (!quiet)
			ubifs_err("bad node type %d", type);
		goto out;
	}

	node_len = le32_to_cpu(ch->len);
	if (node_len + offs > c->leb_size)
		goto out_len;

	if (c->ranges[type].max_len == 0) {
		if (node_len != c->ranges[type].len)
			goto out_len;
	} else if (node_len < c->ranges[type].min_len ||
		   node_len > c->ranges[type].max_len)
		goto out_len;

	if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
	    !c->remounting_rw && c->no_chk_data_crc)
		return 0;

	crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
	node_crc = le32_to_cpu(ch->crc);
	if (crc != node_crc) {
		if (!quiet)
			ubifs_err("bad CRC: calculated %#08x, read %#08x",
				  crc, node_crc);
		err = -EUCLEAN;
		goto out;
	}

	return 0;

out_len:
	if (!quiet)
		ubifs_err("bad node length %d", node_len);
out:
	if (!quiet) {
		ubifs_err("bad node at LEB %d:%d", lnum, offs);
		ubifs_dump_node(c, buf);
		dump_stack();
	}
	return err;
}
Example #28
0
/*===========================================================================
METHOD:
   QCUSBNetStartXmit (Public Method)

DESCRIPTION:
   Convert sk_buff to usb URB and queue for transmit

PARAMETERS
   pNet     [ I ] - Pointer to net device

RETURN VALUE:
   NETDEV_TX_OK on success
   NETDEV_TX_BUSY on error
===========================================================================*/
int QCUSBNetStartXmit( 
   struct sk_buff *     pSKB,
   struct net_device *  pNet )
{
   unsigned long URBListFlags;
   struct sQCUSBNet * pQCDev;
   sAutoPM * pAutoPM;
   sURBList * pURBListEntry, ** ppURBListEnd;
   void * pURBData;
   struct usbnet * pDev = netdev_priv( pNet );
   
   DBG( "\n" );
   
   if (pDev == NULL || pDev->net == NULL)
   {
      DBG( "failed to get usbnet device\n" );
      return NETDEV_TX_BUSY;
   }
   
   pQCDev = (sQCUSBNet *)pDev->data[0];
   if (pQCDev == NULL)
   {
      DBG( "failed to get QMIDevice\n" );
      return NETDEV_TX_BUSY;
   }
   pAutoPM = &pQCDev->mAutoPM;
   
   if (QTestDownReason( pQCDev, DRIVER_SUSPENDED ) == true)
   {
      // Should not happen
      DBG( "device is suspended\n" );
      dump_stack();
      return NETDEV_TX_BUSY;
   }
   
   // Convert the sk_buff into a URB

   // Allocate URBListEntry
   pURBListEntry = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
   if (pURBListEntry == NULL)
   {
      DBG( "unable to allocate URBList memory\n" );
      return NETDEV_TX_BUSY;
   }
   pURBListEntry->mpNext = NULL;

   // Allocate URB
   pURBListEntry->mpURB = usb_alloc_urb( 0, GFP_ATOMIC );
   if (pURBListEntry->mpURB == NULL)
   {
      DBG( "unable to allocate URB\n" );
      return NETDEV_TX_BUSY;
   }

   // Allocate URB transfer_buffer
   pURBData = kmalloc( pSKB->len, GFP_ATOMIC );
   if (pURBData == NULL)
   {
      DBG( "unable to allocate URB data\n" );
      return NETDEV_TX_BUSY;
   }
   // Fill will SKB's data
   memcpy( pURBData, pSKB->data, pSKB->len );

   usb_fill_bulk_urb( pURBListEntry->mpURB,
                      pQCDev->mpNetDev->udev,
                      pQCDev->mpNetDev->out,
                      pURBData,
                      pSKB->len,
                      QCUSBNetURBCallback,
                      pAutoPM );
   
   // Aquire lock on URBList
   spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
   
   // Add URB to end of list
   ppURBListEnd = &pAutoPM->mpURBList;
   while ((*ppURBListEnd) != NULL)
   {
      ppURBListEnd = &(*ppURBListEnd)->mpNext;
   }
   *ppURBListEnd = pURBListEntry;

   spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );

   complete( &pAutoPM->mThreadDoWork );

   // Start transfer timer
   pNet->trans_start = jiffies;
   // Free SKB
   dev_kfree_skb_any( pSKB );

   return NETDEV_TX_OK;
}
Example #29
0
/**
 * ubifs_read_node_wbuf - read node from the media or write-buffer.
 * @wbuf: wbuf to check for un-written data
 * @buf: buffer to read to
 * @type: node type
 * @len: node length
 * @lnum: logical eraseblock number
 * @offs: offset within the logical eraseblock
 *
 * This function reads a node of known type and length, checks it and stores
 * in @buf. If the node partially or fully sits in the write-buffer, this
 * function takes data from the buffer, otherwise it reads the flash media.
 * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative
 * error code in case of failure.
 */
int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
			 int lnum, int offs)
{
	const struct ubifs_info *c = wbuf->c;
	int err, rlen, overlap;
	struct ubifs_ch *ch = buf;

	dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
	       dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
	ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
	ubifs_assert(!(offs & 7) && offs < c->leb_size);
	ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);

	spin_lock(&wbuf->lock);
	overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
	if (!overlap) {
		/* We may safely unlock the write-buffer and read the data */
		spin_unlock(&wbuf->lock);
		return ubifs_read_node(c, buf, type, len, lnum, offs);
	}

	/* Don't read under wbuf */
	rlen = wbuf->offs - offs;
	if (rlen < 0)
		rlen = 0;

	/* Copy the rest from the write-buffer */
	memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
	spin_unlock(&wbuf->lock);

	if (rlen > 0) {
		/* Read everything that goes before write-buffer */
		err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
		if (err && err != -EBADMSG)
			return err;
	}

	if (type != ch->node_type) {
		ubifs_err("bad node type (%d but expected %d)",
			  ch->node_type, type);
		goto out;
	}

	err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
	if (err) {
		ubifs_err("expected node type %d", type);
		return err;
	}

	rlen = le32_to_cpu(ch->len);
	if (rlen != len) {
		ubifs_err("bad node length %d, expected %d", rlen, len);
		goto out;
	}

	return 0;

out:
	ubifs_err("bad node at LEB %d:%d", lnum, offs);
	ubifs_dump_node(c, buf);
	dump_stack();
	return -EINVAL;
}
/**
 * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
 * @np:		device node to get GPIO from
 * @propname:	property name containing gpio specifier(s)
 * @index:	index of the GPIO
 * @flags:	a flags pointer to fill in
 *
 * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
 * value on the error condition. If @flags is not NULL the function also fills
 * in flags for the GPIO.
 */
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
			   int index, enum of_gpio_flags *flags)
{
	/* Return -EPROBE_DEFER to support probe() functions to be called
	 * later when the GPIO actually becomes available
	 */
	struct gg_data gg_data = { .flags = flags, .out_gpio = -EPROBE_DEFER };
	int ret;

	/* .of_xlate might decide to not fill in the flags, so clear it. */
	if (flags)
		*flags = 0;

	ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
					 &gg_data.gpiospec);
	if (ret) {
		pr_debug("%s: can't parse gpios property\n", __func__);
#if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP)
		dump_stack();
#endif
		return ret;
	}

	gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);

	of_node_put(gg_data.gpiospec.np);
	pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
	return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);

/**
 * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
 * @gc:		pointer to the gpio_chip structure
 * @np:		device node of the GPIO chip
 * @gpio_spec:	gpio specifier as found in the device tree
 * @flags:	a flags pointer to fill in
 *
 * This is simple translation function, suitable for the most 1:1 mapped
 * gpio chips. This function performs only one sanity check: whether gpio
 * is less than ngpios (that is specified in the gpio_chip).
 */
int of_gpio_simple_xlate(struct gpio_chip *gc,
			 const struct of_phandle_args *gpiospec, u32 *flags)
{
	/*
	 * We're discouraging gpio_cells < 2, since that way you'll have to
	 * write your own xlate function (that will have to retrive the GPIO
	 * number and the flags from a single gpio cell -- this is possible,
	 * but not recommended).
	 */
	if (gc->of_gpio_n_cells < 2) {
		WARN_ON(1);
		return -EINVAL;
	}

	if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
		return -EINVAL;

	if (gpiospec->args[0] >= gc->ngpio)
		return -EINVAL;

	if (flags)
		*flags = gpiospec->args[1];

	return gpiospec->args[0];
}
EXPORT_SYMBOL(of_gpio_simple_xlate);

/**
 * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
 * @np:		device node of the GPIO chip
 * @mm_gc:	pointer to the of_mm_gpio_chip allocated structure
 *
 * To use this function you should allocate and fill mm_gc with:
 *
 * 1) In the gpio_chip structure:
 *    - all the callbacks
 *    - of_gpio_n_cells
 *    - of_xlate callback (optional)
 *
 * 3) In the of_mm_gpio_chip structure:
 *    - save_regs callback (optional)
 *
 * If succeeded, this function will map bank's memory and will
 * do all necessary work for you. Then you'll able to use .regs
 * to manage GPIOs from the callbacks.
 */
int of_mm_gpiochip_add(struct device_node *np,
		       struct of_mm_gpio_chip *mm_gc)
{
	int ret = -ENOMEM;
	struct gpio_chip *gc = &mm_gc->gc;

	gc->label = kstrdup(np->full_name, GFP_KERNEL);
	if (!gc->label)
		goto err0;

	mm_gc->regs = of_iomap(np, 0);
	if (!mm_gc->regs)
		goto err1;

	gc->base = -1;

	if (mm_gc->save_regs)
		mm_gc->save_regs(mm_gc);

	mm_gc->gc.of_node = np;

	ret = gpiochip_add(gc);
	if (ret)
		goto err2;

	return 0;
err2:
	iounmap(mm_gc->regs);
err1:
	kfree(gc->label);
err0:
	pr_err("%s: GPIO chip registration failed with status %d\n",
	       np->full_name, ret);
	return ret;
}