Example #1
0
int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
		   unsigned int pending)
{
	int err;
	aufs_bindex_t bindex, bend;
	unsigned char do_plink, skip, do_free;
	struct au_branch *br;
	struct au_wbr *wbr;
	struct dentry *root;
	struct inode *dir, *h_dir;
	struct au_sbinfo *sbinfo;
	struct au_hinode *hdir;

	SiMustAnyLock(sb);

	sbinfo = au_sbi(sb);
	AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));

	if (!(sb_flags & MS_RDONLY)) {
		if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
			pr_warning("first branch should be rw\n");
		if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
			pr_warning("shwh should be used with ro\n");
	}

	if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
	    && !au_opt_test(sbinfo->si_mntflags, XINO))
		pr_warning("udba=*notify requires xino\n");

	err = 0;
	root = sb->s_root;
	dir = root->d_inode;
	do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
	bend = au_sbend(sb);
	for (bindex = 0; !err && bindex <= bend; bindex++) {
		skip = 0;
		h_dir = au_h_iptr(dir, bindex);
		br = au_sbr(sb, bindex);
		do_free = 0;

		wbr = br->br_wbr;
		if (wbr)
			wbr_wh_read_lock(wbr);

		switch (br->br_perm) {
		case AuBrPerm_RO:
		case AuBrPerm_ROWH:
		case AuBrPerm_RR:
		case AuBrPerm_RRWH:
			do_free = !!wbr;
			skip = (!wbr
				|| (!wbr->wbr_whbase
				    && !wbr->wbr_plink
				    && !wbr->wbr_orph));
			break;

		case AuBrPerm_RWNoLinkWH:
			/* skip = (!br->br_whbase && !br->br_orph); */
			skip = (!wbr || !wbr->wbr_whbase);
			if (skip && wbr) {
				if (do_plink)
					skip = !!wbr->wbr_plink;
				else
					skip = !wbr->wbr_plink;
			}
			break;

		case AuBrPerm_RW:
			/* skip = (br->br_whbase && br->br_ohph); */
			skip = (wbr && wbr->wbr_whbase);
			if (skip) {
				if (do_plink)
					skip = !!wbr->wbr_plink;
				else
					skip = !wbr->wbr_plink;
			}
			break;

		default:
			BUG();
		}
		if (wbr)
			wbr_wh_read_unlock(wbr);

		if (skip)
			continue;

		hdir = au_hi(dir, bindex);
		au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
		if (wbr)
			wbr_wh_write_lock(wbr);
		err = au_wh_init(au_h_dptr(root, bindex), br, sb);
		if (wbr)
			wbr_wh_write_unlock(wbr);
		au_hn_imtx_unlock(hdir);

		if (!err && do_free) {
			kfree(wbr);
			br->br_wbr = NULL;
		}
	}

	return err;
}
static void rmnet_tx_timeout(struct net_device *dev)
{
	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
}
static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
	const int offset = offsetof(struct blk_io_trace, sector);
	struct blk_io_trace old = {
		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
		.time     = iter->ts,
	};

	if (!trace_seq_putmem(s, &old, offset))
		return 0;
	return trace_seq_putmem(s, &t->sector,
				sizeof(old) - offset + t->pdu_len);
}

static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
{
	return blk_trace_synthesize_old_trace(iter) ?
			TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}

static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
		return TRACE_TYPE_UNHANDLED;

	return print_one_line(iter, true);
}

static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
{
	/* don't output context-info for blk_classic output */
	if (bit == TRACE_BLK_OPT_CLASSIC) {
		if (set)
			trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
		else
			trace_flags |= TRACE_ITER_CONTEXT_INFO;
	}
	return 0;
}

static struct tracer blk_tracer __read_mostly = {
	.name		= "blk",
	.init		= blk_tracer_init,
	.reset		= blk_tracer_reset,
	.start		= blk_tracer_start,
	.stop		= blk_tracer_stop,
	.print_header	= blk_tracer_print_header,
	.print_line	= blk_tracer_print_line,
	.flags		= &blk_tracer_flags,
	.set_flag	= blk_tracer_set_flag,
};

static struct trace_event trace_blk_event = {
	.type		= TRACE_BLK,
	.trace		= blk_trace_event_print,
	.binary		= blk_trace_event_print_binary,
};

static int __init init_blk_tracer(void)
{
	if (!register_ftrace_event(&trace_blk_event)) {
		pr_warning("Warning: could not register block events\n");
		return 1;
	}

	if (register_tracer(&blk_tracer) != 0) {
		pr_warning("Warning: could not register the block tracer\n");
		unregister_ftrace_event(&trace_blk_event);
		return 1;
	}

	return 0;
}

device_initcall(init_blk_tracer);

static int blk_trace_remove_queue(struct request_queue *q)
{
	struct blk_trace *bt;

	bt = xchg(&q->blk_trace, NULL);
	if (bt == NULL)
		return -EINVAL;

	if (atomic_dec_and_test(&blk_probes_ref))
		blk_unregister_tracepoints();

	blk_trace_free(bt);
	return 0;
}

/*
 * Setup everything required to start tracing
 */
static int blk_trace_setup_queue(struct request_queue *q,
				 struct block_device *bdev)
{
	struct blk_trace *old_bt, *bt = NULL;
	int ret = -ENOMEM;

	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		return -ENOMEM;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
	if (!bt->msg_data)
		goto free_bt;

	bt->dev = bdev->bd_dev;
	bt->act_mask = (u16)-1;

	blk_trace_setup_lba(bt, bdev);

	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt != NULL) {
		(void)xchg(&q->blk_trace, old_bt);
		ret = -EBUSY;
		goto free_bt;
	}

	if (atomic_inc_return(&blk_probes_ref) == 1)
		blk_register_tracepoints();
	return 0;

free_bt:
	blk_trace_free(bt);
	return ret;
}

/*
 * sysfs interface to enable and configure tracing
 */

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf);
static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count);
#define BLK_TRACE_DEVICE_ATTR(_name) \
	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
		    sysfs_blk_trace_attr_show, \
		    sysfs_blk_trace_attr_store)

static BLK_TRACE_DEVICE_ATTR(enable);
static BLK_TRACE_DEVICE_ATTR(act_mask);
static BLK_TRACE_DEVICE_ATTR(pid);
static BLK_TRACE_DEVICE_ATTR(start_lba);
static BLK_TRACE_DEVICE_ATTR(end_lba);

static struct attribute *blk_trace_attrs[] = {
	&dev_attr_enable.attr,
	&dev_attr_act_mask.attr,
	&dev_attr_pid.attr,
	&dev_attr_start_lba.attr,
	&dev_attr_end_lba.attr,
	NULL
};

struct attribute_group blk_trace_attr_group = {
	.name  = "trace",
	.attrs = blk_trace_attrs,
};

static const struct {
	int mask;
	const char *str;
} mask_maps[] = {
	{ BLK_TC_READ,		"read"		},
	{ BLK_TC_WRITE,		"write"		},
	{ BLK_TC_BARRIER,	"barrier"	},
	{ BLK_TC_SYNC,		"sync"		},
	{ BLK_TC_QUEUE,		"queue"		},
	{ BLK_TC_REQUEUE,	"requeue"	},
	{ BLK_TC_ISSUE,		"issue"		},
	{ BLK_TC_COMPLETE,	"complete"	},
	{ BLK_TC_FS,		"fs"		},
	{ BLK_TC_PC,		"pc"		},
	{ BLK_TC_AHEAD,		"ahead"		},
	{ BLK_TC_META,		"meta"		},
	{ BLK_TC_DISCARD,	"discard"	},
	{ BLK_TC_DRV_DATA,	"drv_data"	},
};

static int blk_trace_str2mask(const char *str)
{
	int i;
	int mask = 0;
	char *buf, *s, *token;

	buf = kstrdup(str, GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;
	s = strstrip(buf);

	while (1) {
		token = strsep(&s, ",");
		if (token == NULL)
			break;

		if (*token == '\0')
			continue;

		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
			if (strcasecmp(token, mask_maps[i].str) == 0) {
				mask |= mask_maps[i].mask;
				break;
			}
		}
		if (i == ARRAY_SIZE(mask_maps)) {
			mask = -EINVAL;
			break;
		}
	}
	kfree(buf);

	return mask;
}

static ssize_t blk_trace_mask2str(char *buf, int mask)
{
	int i;
	char *p = buf;

	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
		if (mask & mask_maps[i].mask) {
			p += sprintf(p, "%s%s",
				    (p == buf) ? "" : ",", mask_maps[i].str);
		}
	}
	*p++ = '\n';

	return p - buf;
}

static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
{
	if (bdev->bd_disk == NULL)
		return NULL;

	return bdev_get_queue(bdev);
}

static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
					 struct device_attribute *attr,
					 char *buf)
{
	struct hd_struct *p = dev_to_part(dev);
	struct request_queue *q;
	struct block_device *bdev;
	ssize_t ret = -ENXIO;

	lock_kernel();
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		ret = sprintf(buf, "%u\n", !!q->blk_trace);
		goto out_unlock_bdev;
	}

	if (q->blk_trace == NULL)
		ret = sprintf(buf, "disabled\n");
	else if (attr == &dev_attr_act_mask)
		ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
	else if (attr == &dev_attr_pid)
		ret = sprintf(buf, "%u\n", q->blk_trace->pid);
	else if (attr == &dev_attr_start_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
	else if (attr == &dev_attr_end_lba)
		ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
	return ret;
}

static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf, size_t count)
{
	struct block_device *bdev;
	struct request_queue *q;
	struct hd_struct *p;
	u64 value;
	ssize_t ret = -EINVAL;

	if (count == 0)
		goto out;

	if (attr == &dev_attr_act_mask) {
		if (sscanf(buf, "%llx", &value) != 1) {
			/* Assume it is a list of trace category names */
			ret = blk_trace_str2mask(buf);
			if (ret < 0)
				goto out;
			value = ret;
		}
	} else if (sscanf(buf, "%llu", &value) != 1)
		goto out;

	ret = -ENXIO;

	lock_kernel();
	p = dev_to_part(dev);
	bdev = bdget(part_devt(p));
	if (bdev == NULL)
		goto out_unlock_kernel;

	q = blk_trace_get_queue(bdev);
	if (q == NULL)
		goto out_bdput;

	mutex_lock(&bdev->bd_mutex);

	if (attr == &dev_attr_enable) {
		if (value)
			ret = blk_trace_setup_queue(q, bdev);
		else
			ret = blk_trace_remove_queue(q);
		goto out_unlock_bdev;
	}

	ret = 0;
	if (q->blk_trace == NULL)
		ret = blk_trace_setup_queue(q, bdev);

	if (ret == 0) {
		if (attr == &dev_attr_act_mask)
			q->blk_trace->act_mask = value;
		else if (attr == &dev_attr_pid)
			q->blk_trace->pid = value;
		else if (attr == &dev_attr_start_lba)
			q->blk_trace->start_lba = value;
		else if (attr == &dev_attr_end_lba)
			q->blk_trace->end_lba = value;
	}

out_unlock_bdev:
	mutex_unlock(&bdev->bd_mutex);
out_bdput:
	bdput(bdev);
out_unlock_kernel:
	unlock_kernel();
out:
	return ret ? ret : count;
}

int blk_trace_init_sysfs(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}

void blk_trace_remove_sysfs(struct device *dev)
{
	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
}
static __init void da830_evm_init(void)
{
	struct davinci_soc_info *soc_info = &davinci_soc_info;
	int ret;

	ret = da830_register_edma(da830_edma_rsv);
	if (ret)
		pr_warning("da830_evm_init: edma registration failed: %d\n",
				ret);

	ret = davinci_cfg_reg_list(da830_i2c0_pins);
	if (ret)
		pr_warning("da830_evm_init: i2c0 mux setup failed: %d\n",
				ret);

	ret = da8xx_register_i2c(0, &da830_evm_i2c_0_pdata);
	if (ret)
		pr_warning("da830_evm_init: i2c0 registration failed: %d\n",
				ret);

	da830_evm_usb_init();

	soc_info->emac_pdata->rmii_en = 1;
	soc_info->emac_pdata->phy_id = DA830_EVM_PHY_ID;

	ret = davinci_cfg_reg_list(da830_cpgmac_pins);
	if (ret)
		pr_warning("da830_evm_init: cpgmac mux setup failed: %d\n",
				ret);

	ret = da8xx_register_emac();
	if (ret)
		pr_warning("da830_evm_init: emac registration failed: %d\n",
				ret);

	ret = da8xx_register_watchdog();
	if (ret)
		pr_warning("da830_evm_init: watchdog registration failed: %d\n",
				ret);

	davinci_serial_init(&da830_evm_uart_config);
	i2c_register_board_info(1, da830_evm_i2c_devices,
			ARRAY_SIZE(da830_evm_i2c_devices));

	ret = davinci_cfg_reg_list(da830_evm_mcasp1_pins);
	if (ret)
		pr_warning("da830_evm_init: mcasp1 mux setup failed: %d\n",
				ret);

	da8xx_register_mcasp(1, &da830_evm_snd_data);

	da830_evm_init_mmc();

	ret = da8xx_register_rtc();
	if (ret)
		pr_warning("da830_evm_init: rtc setup failed: %d\n", ret);

	ret = da8xx_register_spi(0, da830evm_spi_info,
				 ARRAY_SIZE(da830evm_spi_info));
	if (ret)
		pr_warning("da830_evm_init: spi 0 registration failed: %d\n",
			   ret);
}
Example #5
0
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			port->nbytes_to_tty += count;
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; this is immediate with low_latency, and
	 * may trigger callbacks to this driver ... so drop the spinlock.
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
/*
 * This thread processes interrupts reported by the Primary Interrupt Handler.
 */
static int twl6030_irq_thread(void *data)
{
	long irq = (long)data;
	static unsigned i2c_errors;
	static const unsigned max_i2c_errors = 100;
	int ret;

	current->flags |= PF_NOFREEZE;

	while (!kthread_should_stop()) {
		int i;
		union {
		u8 bytes[4];
		u32 int_sts;
		} sts;
		u32 int_sts; /* sts.int_sts converted to CPU endianness */

		/* Wait for IRQ, then read PIH irq status (also blocking) */
		wait_for_completion_interruptible(&irq_event);

		/* read INT_STS_A, B and C in one shot using a burst read */
		ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
				REG_INT_STS_A, 3);
		if (ret) {
			pr_warning("twl6030: I2C error %d reading PIH ISR\n",
					ret);
			if (++i2c_errors >= max_i2c_errors) {
				printk(KERN_ERR "Maximum I2C error count"
						" exceeded.  Terminating %s.\n",
						__func__);
				break;
			}
			complete(&irq_event);
			continue;
		}



		sts.bytes[3] = 0; /* Only 24 bits are valid*/

		/*
		 * Since VBUS status bit is not reliable for VBUS disconnect
		 * use CHARGER VBUS detection status bit instead.
		 */
		if (sts.bytes[2] & 0x10)
			sts.bytes[2] |= 0x08;

		int_sts = le32_to_cpu(sts.int_sts);
		for (i = 0; int_sts; int_sts >>= 1, i++) {
			local_irq_disable();
			if (int_sts & 0x1) {
				int module_irq = twl6030_irq_base +
					twl6030_interrupt_mapping[i];
				generic_handle_irq(module_irq);

			}
		local_irq_enable();
		}

		/*
		 * NOTE:
		 * Simulation confirms that documentation is wrong w.r.t the
		 * interrupt status clear operation. A single *byte* write to
		 * any one of STS_A to STS_C register results in all three
		 * STS registers being reset. Since it does not matter which
		 * value is written, all three registers are cleared on a
		 * single byte write, so we just use 0x0 to clear.
		 */
		ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
		if (ret)
			pr_warning("twl6030: I2C error in clearing PIH ISR\n");

		enable_irq(irq);
	}

	return 0;
}
Example #7
0
void xics_migrate_irqs_away(void)
{
	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
	unsigned int irq, virq;
	struct irq_desc *desc;

	
	if (hw_cpu == xics_default_server)
		xics_update_irq_servers();

	
	icp_ops->set_priority(0);

	
	xics_set_cpu_giq(xics_default_distrib_server, 0);

	
	icp_ops->set_priority(DEFAULT_PRIORITY);

	for_each_irq_desc(virq, desc) {
		struct irq_chip *chip;
		long server;
		unsigned long flags;
		struct ics *ics;

		
		if (virq < NUM_ISA_INTERRUPTS)
			continue;
		
		if (!desc->action)
			continue;
		if (desc->irq_data.domain != xics_host)
			continue;
		irq = desc->irq_data.hwirq;
		
		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
			continue;
		chip = irq_desc_get_chip(desc);
		if (!chip || !chip->irq_set_affinity)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		
		server = -1;
		ics = irq_get_chip_data(virq);
		if (ics)
			server = ics->get_server(ics, irq);
		if (server < 0) {
			printk(KERN_ERR "%s: Can't find server for irq %d\n",
			       __func__, irq);
			goto unlock;
		}

		if (server != hw_cpu)
			goto unlock;

		
		if (cpu_online(cpu))
			pr_warning("IRQ %u affinity broken off cpu %u\n",
			       virq, cpu);

		
		raw_spin_unlock_irqrestore(&desc->lock, flags);
		irq_set_affinity(virq, cpu_all_mask);
		continue;
unlock:
		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
/*
 * Select the PowerClass for the current bus width
 * If power class is defined for 4/8 bit bus in the
 * extended CSD register, select it by executing the
 * mmc_switch command.
 */
static int mmc_select_powerclass(struct mmc_card *card,
		unsigned int bus_width, u8 *ext_csd)
{
	int err = 0;
	unsigned int pwrclass_val;
	unsigned int index = 0;
	struct mmc_host *host;

	BUG_ON(!card);

	host = card->host;
	BUG_ON(!host);

	if (ext_csd == NULL)
		return 0;

	/* Power class selection is supported for versions >= 4.0 */
	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/* Power class values are defined only for 4/8 bit bus */
	if (bus_width == EXT_CSD_BUS_WIDTH_1)
		return 0;

	switch (1 << host->ios.vdd) {
	case MMC_VDD_165_195:
		if (host->ios.clock <= 26000000)
			index = EXT_CSD_PWR_CL_26_195;
		else if	(host->ios.clock <= 52000000)
			index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
				EXT_CSD_PWR_CL_52_195 :
				EXT_CSD_PWR_CL_DDR_52_195;
		else if (host->ios.clock <= 200000000)
			index = EXT_CSD_PWR_CL_200_195;
		break;
	case MMC_VDD_27_28:
	case MMC_VDD_28_29:
	case MMC_VDD_29_30:
	case MMC_VDD_30_31:
	case MMC_VDD_31_32:
	case MMC_VDD_32_33:
	case MMC_VDD_33_34:
	case MMC_VDD_34_35:
	case MMC_VDD_35_36:
		if (host->ios.clock <= 26000000)
			index = EXT_CSD_PWR_CL_26_360;
		else if	(host->ios.clock <= 52000000)
			index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
				EXT_CSD_PWR_CL_52_360 :
				EXT_CSD_PWR_CL_DDR_52_360;
		else if (host->ios.clock <= 200000000)
			index = EXT_CSD_PWR_CL_200_360;
		break;
	default:
		pr_warning("%s: Voltage range not supported "
			   "for power class.\n", mmc_hostname(host));
		return -EINVAL;
	}

	pwrclass_val = ext_csd[index];

	if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
				EXT_CSD_PWR_CL_8BIT_SHIFT;
	else
/*
 * mdss_dsi_cmds_rx() - dcs read from panel
 * @ctrl: dsi controller
 * @cmds: read command descriptor
 * @len: number of bytes to read back
 *
 * controller have 4 registers can hold 16 bytes of rxed data
 * dcs packet: 4 bytes header + payload + 2 bytes crc
 * 2 padding bytes add to payload to have payload length is mutipled by 4
 * 1st read: 4 bytes header + 8 bytes payload + 2 padding + 2 crc
 * 2nd read: 12 bytes payload + 2 padding + 2 crc
 * 3rd read: 12 bytes payload + 2 padding + 2 crc
 *
 */
int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
			struct dsi_cmd_desc *cmds, int rlen)
{
	int data_byte, rx_byte, dlen, end;
	int short_response, diff, pkt_size, ret = 0;
	int i;
	struct dsi_buf *tp, *rp;
	char cmd;
	u32 dsi_ctrl, data;
	int video_mode;
	u32 left_dsi_ctrl = 0;
	bool left_ctrl_restore = false;

	pr_debug("%s : ++ \n",__func__);

	if (ctrl->shared_pdata.broadcast_enable) {
		if (ctrl->ndx == DSI_CTRL_0) {
			pr_debug("%s: Broadcast mode. 1st ctrl\n",
				 __func__);
			return 0;
		}
	}

	if (ctrl->shared_pdata.broadcast_enable) {
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			left_dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base
								+ 0x0004);
			video_mode = left_dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
			if (video_mode) {
				data = left_dsi_ctrl | 0x04; /* CMD_MODE_EN */
				MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
						data);
				left_ctrl_restore = true;
			}
		}
	}

	/* turn on cmd mode
	* for video mode, do not send cmds more than
	* one pixel line, since it only transmit it
	* during BLLP.
	*/
	dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
	if (video_mode) {
		data = dsi_ctrl | 0x04; /* CMD_MODE_EN */
		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data);
	}

	if (rlen == 0) {
		short_response = 1;
		rx_byte = 4;
		pkt_size = 0;
	} else {
		short_response = 0;
		data_byte = 8;	/* first read */
		/*
		 * add extra 2 padding bytes to have overall
		 * packet size is multipe by 4. This also make
		 * sure 4 bytes dcs headerlocates within a
		 * 32 bits register after shift in.
		 */
		pkt_size = data_byte + 2;
		rx_byte = data_byte + 8; /* 4 header + 2 crc  + 2 padding*/
	}


	tp = &ctrl->tx_buf;
	rp = &ctrl->rx_buf;

	end = 0;
	mdss_dsi_buf_init(rp);
	while (!end) {
		pr_debug("%s:  rlen=%d pkt_size=%d rx_byte=%d\n",
				__func__, rlen, pkt_size, rx_byte);
		 if (!short_response) {
			max_pktsize[0] = pkt_size;
			mdss_dsi_buf_init(tp);
			ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
			if (!ret) {
				pr_err("%s: failed to add max_pkt_size\n",
					__func__);
				rp->len = 0;
				goto end;
			}

			mdss_dsi_wait4video_eng_busy(ctrl);

			mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
			ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
			if (IS_ERR_VALUE(ret)) {
				mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
				pr_err("%s: failed to tx max_pkt_size\n",
					__func__);
				rp->len = 0;
				goto end;
			}
			pr_debug("%s: max_pkt_size=%d sent\n",
						__func__, pkt_size);
		}

		mdss_dsi_buf_init(tp);
		ret = mdss_dsi_cmd_dma_add(tp, cmds);
		if (!ret) {
			pr_err("%s: failed to add cmd = 0x%x\n",
				__func__,  cmds->payload[0]);
			rp->len = 0;
			goto end;
		}

		mdss_dsi_wait4video_eng_busy(ctrl);	/* video mode only */
		mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
		/* transmit read comamnd to client */
		ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
		if (IS_ERR_VALUE(ret)) {
			mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
			pr_err("%s: failed to tx cmd = 0x%x\n",
				__func__,  cmds->payload[0]);
			rp->len = 0;
			goto end;
		}
		/*
		 * once cmd_dma_done interrupt received,
		 * return data from client is ready and stored
		 * at RDBK_DATA register already
		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
		 * after that dcs header lost during shift into registers
		 */
		dlen = mdss_dsi_cmd_dma_rx(ctrl, rp, rx_byte);

		if (short_response)
			break;

		if (rlen <= data_byte) {
			diff = data_byte - rlen;
			end = 1;
		} else {
			diff = 0;
			rlen -= data_byte;
		}

		dlen -= 2; /* 2 padding bytes */
		dlen -= 2; /* 2 crc */
		dlen -= diff;
		rp->data += dlen;	/* next start position */
		rp->len += dlen;
		data_byte = 12;	/* NOT first read */
		pkt_size += data_byte;
		pr_debug("%s: rp data=%x len=%d dlen=%d diff=%d\n",
			__func__, (int)rp->data, rp->len, dlen, diff);
	}

	rp->data = rp->start;	/* move back to start position */
	cmd = rp->data[0];
	switch (cmd) {
	case DTYPE_ACK_ERR_RESP:
		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
		rp->len = 0;
	case DTYPE_GEN_READ1_RESP:
	case DTYPE_DCS_READ1_RESP:
		mdss_dsi_short_read1_resp(rp);
		break;
	case DTYPE_GEN_READ2_RESP:
	case DTYPE_DCS_READ2_RESP:
		mdss_dsi_short_read2_resp(rp);
		break;
	case DTYPE_GEN_LREAD_RESP:
	case DTYPE_DCS_LREAD_RESP:
		mdss_dsi_long_read_resp(rp);
		break;
	default:
		pr_warning("%s:Invalid response cmd :len=%d dlen=%d diff=%d\n", __func__, rp->len, dlen, diff);
		for (i = 0;i < (rp->len); i++)
			pr_info(" rp[%d]=%x \n",i,rp->data[i]);
		rp->len = 0;
	}
end:
	if (left_ctrl_restore)
		MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
					left_dsi_ctrl); /*restore */
	if (video_mode)
		MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
					dsi_ctrl); /* restore */
	pr_debug("%s : -- \n",__func__);

	return rp->len;
}
Example #10
0
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
	int err = 0, idx;
	unsigned int part_size;
	u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;

	BUG_ON(!card);

	if (!ext_csd)
		return 0;

	
	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
	if (card->csd.structure == 3) {
		if (card->ext_csd.raw_ext_csd_structure > 2) {
			pr_err("%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					card->ext_csd.raw_ext_csd_structure);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 6) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}

	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
	mmc_select_card_type(card);

	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
	card->ext_csd.raw_erase_timeout_mult =
		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
	card->ext_csd.raw_hc_erase_grp_size =
		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

		
		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];

		
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];

		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
				mmc_part_add(card, part_size,
					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
					"boot%d", idx, true,
					MMC_BLK_DATA_AREA_BOOT);
			}
		}
	}

	card->ext_csd.raw_hc_erase_gap_size =
		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
	card->ext_csd.raw_sec_trim_mult =
		ext_csd[EXT_CSD_SEC_TRIM_MULT];
	card->ext_csd.raw_sec_erase_mult =
		ext_csd[EXT_CSD_SEC_ERASE_MULT];
	card->ext_csd.raw_sec_feature_support =
		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
	card->ext_csd.raw_trim_mult =
		ext_csd[EXT_CSD_TRIM_MULT];
	if (card->ext_csd.rev >= 4) {
		card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}

		if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
			EXT_CSD_PART_SUPPORT_PART_EN) {
			if (card->ext_csd.enhanced_area_en != 1) {
				hc_erase_grp_sz =
					ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
				hc_wp_grp_sz =
					ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

				card->ext_csd.enhanced_area_en = 1;
			}

			for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
				if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
					continue;
				part_size =
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
					<< 16) +
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
					<< 8) +
				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
				part_size *= (size_t)(hc_erase_grp_sz *
					hc_wp_grp_sz);
				mmc_part_add(card, part_size << 19,
					EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
					"gp%d", idx, false,
					MMC_BLK_DATA_AREA_GP);
			}
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];

		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
		card->ext_csd.boot_ro_lockable = true;
	}

	if (card->ext_csd.rev >= 5) {
		
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
			card->ext_csd.bkops = 1;
			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
			card->ext_csd.raw_bkops_status =
				ext_csd[EXT_CSD_BKOPS_STATUS];
			if (mmc_card_support_bkops(card)) {
				if (!card->ext_csd.bkops_en &&
					card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
					err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
						EXT_CSD_BKOPS_EN, 1, 0);
					if (err) {
						pr_warning("%s: Enabling BKOPS failed\n",
							mmc_hostname(card->host));
					} else {
						card->ext_csd.bkops_en = 1;
						pr_warning("%s: BKOPS enabled\n",
							mmc_hostname(card->host));
					}
				} else {
					if (card->ext_csd.bkops_en) {
						pr_warning("%s: BKOPS already enabled\n",
						mmc_hostname(card->host));
					} else {
						pr_warning("%s: BKOPS not enabled\n",
						mmc_hostname(card->host));
					}
				}
			} else {
				if (card->ext_csd.bkops_en) {
					card->ext_csd.bkops_en = 0;
					pr_warning("%s: BKOPS disabled\n", mmc_hostname(card->host));
				} else
					pr_warning("%s: BKOPS not support\n", mmc_hostname(card->host));
			}
		} else {
			pr_warning("%s: BKOPS not support\n", mmc_hostname(card->host));
		}
		
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}

		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
	}

	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

	
	if (card->ext_csd.rev >= 6) {
		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;

		card->ext_csd.generic_cmd6_time = 10 *
			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
		card->ext_csd.power_off_longtime = 10 *
			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];

		card->ext_csd.cache_size =
			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;

		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
			card->ext_csd.data_sector_size = 4096;
		else
			card->ext_csd.data_sector_size = 512;

		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
			card->ext_csd.data_tag_unit_size =
			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
			(card->ext_csd.data_sector_size);
		} else {
			card->ext_csd.data_tag_unit_size = 0;
		}
		card->ext_csd.max_packed_writes =
			ext_csd[EXT_CSD_MAX_PACKED_WRITES];
		card->ext_csd.max_packed_reads =
			ext_csd[EXT_CSD_MAX_PACKED_READS];
	}

	if (card->cid.manfid == 0x45) {
		char buf[7] = {0};
		sprintf(buf, "%c%c%c%c%c%c", ext_csd[73], ext_csd[74], ext_csd[75],
										ext_csd[76], ext_csd[77], ext_csd[78]);
		strncpy(card->ext_csd.fwrev, buf, strlen(buf));
	}

	if (mmc_card_mmc(card)) {
		char *buf;
		int i, j;
		ssize_t n = 0;
		pr_info("%s: cid %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_cid[0], card->raw_cid[1],
			card->raw_cid[2], card->raw_cid[3]);
		pr_info("%s: csd %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_csd[0], card->raw_csd[1],
			card->raw_csd[2], card->raw_csd[3]);

		buf = kmalloc(512, GFP_KERNEL);
		if (buf) {
			for (i = 0; i < 32; i++) {
				for (j = 511 - (16 * i); j >= 496 - (16 * i); j--)
					n += sprintf(buf + n, "%02x", ext_csd[j]);
				n += sprintf(buf + n, "\n");
				pr_info("%s: ext_csd %s", mmc_hostname(card->host), buf);
				n = 0;
			}
		}
		if (buf)
			kfree(buf);
	}

out:
	return err;
}
Example #11
0
static void __init qnap_ts209_init(void)
{
	/*
	 * Setup basic Orion functions. Need to be called early.
	 */
	orion5x_init();

	/*
	 * Setup flash mapping
	 */
	orion5x_setup_dev_boot_win(QNAP_TS209_NOR_BOOT_BASE,
			    QNAP_TS209_NOR_BOOT_SIZE);

	/*
	 * Open a special address decode windows for the PCIe WA.
	 */
	orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
				ORION5X_PCIE_WA_SIZE);

	/*
	 * Setup Multiplexing Pins --
	 * MPP[0] Reserved
	 * MPP[1] USB copy button (0 active)
	 * MPP[2] Load defaults button (0 active)
	 * MPP[3] GPIO RTC
	 * MPP[4-5] Reserved
	 * MPP[6] PCI Int A
	 * MPP[7] PCI Int B
	 * MPP[8-11] Reserved
	 * MPP[12] SATA 0 presence
	 * MPP[13] SATA 1 presence
	 * MPP[14] SATA 0 active
	 * MPP[15] SATA 1 active
	 * MPP[16] UART1 RXD
	 * MPP[17] UART1 TXD
	 * MPP[18] SW_RST (0 active)
	 * MPP[19] Reserved
	 * MPP[20] PCI clock 0
	 * MPP[21] PCI clock 1
	 * MPP[22] USB 0 over current
	 * MPP[23-25] Reserved
	 */
	orion5x_write(MPP_0_7_CTRL, 0x3);
	orion5x_write(MPP_8_15_CTRL, 0x55550000);
	orion5x_write(MPP_16_19_CTRL, 0x5500);
	orion5x_gpio_set_valid_pins(0x3cc0fff);

	/* register ts209 specific power-off method */
	pm_power_off = qnap_ts209_power_off;

	platform_add_devices(qnap_ts209_devices,
				ARRAY_SIZE(qnap_ts209_devices));

	/* Get RTC IRQ and register the chip */
	if (gpio_request(TS209_RTC_GPIO, "rtc") == 0) {
		if (gpio_direction_input(TS209_RTC_GPIO) == 0)
			qnap_ts209_i2c_rtc.irq = gpio_to_irq(TS209_RTC_GPIO);
		else
			gpio_free(TS209_RTC_GPIO);
	}
	if (qnap_ts209_i2c_rtc.irq == 0)
		pr_warning("qnap_ts209_init: failed to get RTC IRQ\n");
	i2c_register_board_info(0, &qnap_ts209_i2c_rtc, 1);

	ts209_find_mac_addr();
	orion5x_eth_init(&qnap_ts209_eth_data);

	orion5x_sata_init(&qnap_ts209_sata_data);
}
Example #12
0
static void hsu_global_init(void)
{
	struct hsu_port *hsu;
	struct uart_hsu_port *uport;
	struct hsu_dma_chan *dchan;
	int i, ret;

	hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
	if (!hsu)
		return;

	/* Get basic io resource and map it */
	hsu->paddr = 0xffa28000;
	hsu->iolen = 0x1000;

	if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
		pr_warning("HSU: error in request mem region\n");

	hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
	if (!hsu->reg) {
		pr_err("HSU: error in ioremap\n");
		ret = -ENOMEM;
		goto err_free_region;
	}

	/* Initialise the 3 UART ports */
	uport = hsu->port;
	for (i = 0; i < 3; i++) {
		uport->port.type = PORT_MFD;
		uport->port.iotype = UPIO_MEM;
		uport->port.mapbase = (resource_size_t)hsu->paddr
					+ HSU_PORT_REG_OFFSET
					+ i * HSU_PORT_REG_LENGTH;
		uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
					+ i * HSU_PORT_REG_LENGTH;

		sprintf(uport->name, "hsu_port%d", i);
		uport->port.fifosize = 64;
		uport->port.ops = &serial_hsu_pops;
		uport->port.line = i;
		uport->port.flags = UPF_IOREMAP;
		/* set the scalable maxim support rate to 2746800 bps */
		uport->port.uartclk = 115200 * 24 * 16;

		uport->running = 0;
		uport->txc = &hsu->chans[i * 2];
		uport->rxc = &hsu->chans[i * 2 + 1];

		serial_hsu_ports[i] = uport;
		uport->index = i;

		if (hsu_dma_enable & (1<<i))
			uport->use_dma = 1;
		else
			uport->use_dma = 0;

		uport++;
	}

	/* Initialise 6 dma channels */
	dchan = hsu->chans;
	for (i = 0; i < 6; i++) {
		dchan->id = i;
		dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
		dchan->uport = &hsu->port[i/2];
		dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
				i * HSU_DMA_CHANS_REG_LENGTH;

		dchan++;
	}

	phsu = hsu;
	hsu_debugfs_init(hsu);
	return;

err_free_region:
	release_mem_region(hsu->paddr, hsu->iolen);
	kfree(hsu);
	return;
}
Example #13
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pfn = 0;  /* code starts at PA 0 */
		pte = alloc_pte();
		for (pte_ofs = 0; address < (unsigned long)_einittext;
		     pfn++, pte_ofs++, address += PAGE_SIZE) {
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		*(pte_t *)pmd = pteval;
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}
Example #14
0
/* called without aufs lock */
int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
{
	int err, n, token;
	aufs_bindex_t bindex;
	unsigned char skipped;
	struct dentry *root;
	struct au_opt *opt, *opt_tail;
	char *opt_str;
	/* reduce the stack space */
	union {
		struct au_opt_xino_itrunc *xino_itrunc;
		struct au_opt_wbr_create *create;
	} u;
	struct {
		substring_t args[MAX_OPT_ARGS];
	} *a;

	err = -ENOMEM;
	a = kmalloc(sizeof(*a), GFP_NOFS);
	if (unlikely(!a))
		goto out;

	root = sb->s_root;
	err = 0;
	bindex = 0;
	opt = opts->opt;
	opt_tail = opt + opts->max_opt - 1;
	opt->type = Opt_tail;
	while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
		err = -EINVAL;
		skipped = 0;
		token = match_token(opt_str, options, a->args);
		switch (token) {
		case Opt_br:
			err = 0;
			while (!err && (opt_str = strsep(&a->args[0].from, ":"))
			       && *opt_str) {
				err = opt_add(opt, opt_str, opts->sb_flags,
					      bindex++);
				if (unlikely(!err && ++opt > opt_tail)) {
					err = -E2BIG;
					break;
				}
				opt->type = Opt_tail;
				skipped = 1;
			}
			break;
		case Opt_add:
			if (unlikely(match_int(&a->args[0], &n))) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			bindex = n;
			err = opt_add(opt, a->args[1].from, opts->sb_flags,
				      bindex);
			if (!err)
				opt->type = token;
			break;
		case Opt_append:
			err = opt_add(opt, a->args[0].from, opts->sb_flags,
				      /*dummy bindex*/1);
			if (!err)
				opt->type = token;
			break;
		case Opt_prepend:
			err = opt_add(opt, a->args[0].from, opts->sb_flags,
				      /*bindex*/0);
			if (!err)
				opt->type = token;
			break;
		case Opt_del:
			err = au_opts_parse_del(&opt->del, a->args);
			if (!err)
				opt->type = token;
			break;
#if 0 /* reserved for future use */
		case Opt_idel:
			del->pathname = "(indexed)";
			if (unlikely(match_int(&args[0], &n))) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			err = au_opts_parse_idel(sb, n, &opt->del, a->args);
			if (!err)
				opt->type = token;
			break;
#endif
		case Opt_mod:
			err = au_opts_parse_mod(&opt->mod, a->args);
			if (!err)
				opt->type = token;
			break;
#ifdef IMOD /* reserved for future use */
		case Opt_imod:
			u.mod->path = "(indexed)";
			if (unlikely(match_int(&a->args[0], &n))) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
			if (!err)
				opt->type = token;
			break;
#endif
		case Opt_xino:
			err = au_opts_parse_xino(sb, &opt->xino, a->args);
			if (!err)
				opt->type = token;
			break;

		case Opt_trunc_xino_path:
			err = au_opts_parse_xino_itrunc_path
				(sb, &opt->xino_itrunc, a->args);
			if (!err)
				opt->type = token;
			break;

		case Opt_itrunc_xino:
			u.xino_itrunc = &opt->xino_itrunc;
			if (unlikely(match_int(&a->args[0], &n))) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			u.xino_itrunc->bindex = n;
			aufs_read_lock(root, AuLock_FLUSH);
			if (n < 0 || au_sbend(sb) < n) {
				pr_err("out of bounds, %d\n", n);
				aufs_read_unlock(root, !AuLock_IR);
				break;
			}
			aufs_read_unlock(root, !AuLock_IR);
			err = 0;
			opt->type = token;
			break;

		case Opt_dirwh:
			if (unlikely(match_int(&a->args[0], &opt->dirwh)))
				break;
			err = 0;
			opt->type = token;
			break;

		case Opt_rdcache:
			if (unlikely(match_int(&a->args[0], &n))) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			if (unlikely(n > AUFS_RDCACHE_MAX)) {
				pr_err("rdcache must be smaller than %d\n",
				       AUFS_RDCACHE_MAX);
				break;
			}
			opt->rdcache = n;
			err = 0;
			opt->type = token;
			break;
		case Opt_rdblk:
			if (unlikely(match_int(&a->args[0], &n)
				     || n < 0
				     || n > KMALLOC_MAX_SIZE)) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			if (unlikely(n && n < NAME_MAX)) {
				pr_err("rdblk must be larger than %d\n",
				       NAME_MAX);
				break;
			}
			opt->rdblk = n;
			err = 0;
			opt->type = token;
			break;
		case Opt_rdhash:
			if (unlikely(match_int(&a->args[0], &n)
				     || n < 0
				     || n * sizeof(struct hlist_head)
				     > KMALLOC_MAX_SIZE)) {
				pr_err("bad integer in %s\n", opt_str);
				break;
			}
			opt->rdhash = n;
			err = 0;
			opt->type = token;
			break;

		case Opt_trunc_xino:
		case Opt_notrunc_xino:
		case Opt_noxino:
		case Opt_trunc_xib:
		case Opt_notrunc_xib:
		case Opt_shwh:
		case Opt_noshwh:
		case Opt_plink:
		case Opt_noplink:
		case Opt_list_plink:
		case Opt_dio:
		case Opt_nodio:
		case Opt_diropq_a:
		case Opt_diropq_w:
		case Opt_warn_perm:
		case Opt_nowarn_perm:
		case Opt_refrof:
		case Opt_norefrof:
		case Opt_verbose:
		case Opt_noverbose:
		case Opt_sum:
		case Opt_nosum:
		case Opt_wsum:
		case Opt_rdblk_def:
		case Opt_rdhash_def:
			err = 0;
			opt->type = token;
			break;

		case Opt_udba:
			opt->udba = udba_val(a->args[0].from);
			if (opt->udba >= 0) {
				err = 0;
				opt->type = token;
			} else
				pr_err("wrong value, %s\n", opt_str);
			break;

		case Opt_wbr_create:
			u.create = &opt->wbr_create;
			u.create->wbr_create
				= au_wbr_create_val(a->args[0].from, u.create);
			if (u.create->wbr_create >= 0) {
				err = 0;
				opt->type = token;
			} else
				pr_err("wrong value, %s\n", opt_str);
			break;
		case Opt_wbr_copyup:
			opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
			if (opt->wbr_copyup >= 0) {
				err = 0;
				opt->type = token;
			} else
				pr_err("wrong value, %s\n", opt_str);
			break;

		case Opt_ignore:
			pr_warning("ignored %s\n", opt_str);
			/*FALLTHROUGH*/
		case Opt_ignore_silent:
			skipped = 1;
			err = 0;
			break;
		case Opt_err:
			pr_err("unknown option %s\n", opt_str);
			break;
		}

		if (!err && !skipped) {
			if (unlikely(++opt > opt_tail)) {
				err = -E2BIG;
				opt--;
				opt->type = Opt_tail;
				break;
			}
			opt->type = Opt_tail;
		}
	}

	kfree(a);
	dump_opts(opts);
	if (unlikely(err))
		au_opts_free(opts);

out:
	return err;
}
Example #15
0
File: vario.c Project: cran/gstat
void check_variography(const VARIOGRAM **v, int n_vars)
/*
 * check for intrinsic correlation, linear model of coregionalisation
 * or else (with warning) Cauchy Swartz
 */
{
	int i, j, k, ic = 0, lmc, posdef = 1;
	MAT **a = NULL;
	double b;
	char *reason = NULL;

	if (n_vars <= 1)
		return;
/* 
 * find out if lmc (linear model of coregionalization) hold: 
 * all models must have equal base models (sequence and range)
 */
	for (i = 1, lmc = 1; lmc && i < get_n_vgms(); i++) {
		if (v[0]->n_models != v[i]->n_models) {
			reason = "number of models differ";
			lmc = 0;
		}
		for (k = 0; lmc && k < v[0]->n_models; k++) {
			if (v[0]->part[k].model != v[i]->part[k].model) {
				reason = "model types differ";
				lmc = 0;
			}
			if (v[0]->part[k].range[0] != v[i]->part[k].range[0]) {
				reason = "ranges differ";
				lmc = 0;
			}
		}
		for (k = 0; lmc && k < v[0]->n_models; k++)
			if (v[0]->part[k].tm_range != NULL) {
				if (v[i]->part[k].tm_range == NULL) {
					reason = "anisotropy for part of models";
					lmc = 0;
				} else if (
		v[0]->part[k].tm_range->ratio[0] != v[i]->part[k].tm_range->ratio[0] ||
		v[0]->part[k].tm_range->ratio[1] != v[i]->part[k].tm_range->ratio[1] ||
		v[0]->part[k].tm_range->angle[0] != v[i]->part[k].tm_range->angle[0] ||
		v[0]->part[k].tm_range->angle[1] != v[i]->part[k].tm_range->angle[1] ||
		v[0]->part[k].tm_range->angle[2] != v[i]->part[k].tm_range->angle[2]
				) {
					reason = "anisotropy parameters are not equal";
					lmc = 0;
				}
			} else if (v[i]->part[k].tm_range != NULL) {
				reason = "anisotropy for part of models";
				lmc = 0;
			}
	}
	if (lmc) {
/*
 * check for ic:
 */
		a = (MAT **) emalloc(v[0]->n_models * sizeof(MAT *));
		for (k = 0; k < v[0]->n_models; k++)
			a[k] = m_get(n_vars, n_vars);
		for (i = 0; i < n_vars; i++) {
			for (j = 0; j < n_vars; j++) { /* for all variogram triplets: */
				for (k = 0; k < v[0]->n_models; k++)
					ME(a[k], i, j) = v[LTI(i,j)]->part[k].sill;
			}
		}
		/* for ic: a's must be scaled versions of each other: */
		ic = 1;
		for (k = 1, ic = 1; ic && k < v[0]->n_models; k++) {
			b = ME(a[0], 0, 0)/ME(a[k], 0, 0);
			for (i = 0; ic && i < n_vars; i++)
				for (j = 0; ic && j < n_vars; j++)
					if (fabs(ME(a[0], i, j) / ME(a[k], i, j) - b) > EPSILON)
						ic = 0;	
		}
		/* check posdef matrices */
		for (i = 0, lmc = 1, posdef = 1; i < v[0]->n_models; i++) {
			posdef = is_posdef(a[i]);
			if (posdef == 0) {
				reason = "coefficient matrix not positive definite";
				if (DEBUG_COV) {
					printlog("non-positive definite coefficient matrix %d:\n", 
						i);
					m_logoutput(a[i]);
				}
				ic = lmc = 0;
			}
			if (! posdef)
				printlog(
				"non-positive definite coefficient matrix in structure %d", 
				i+1);
		}
		for (k = 0; k < v[0]->n_models; k++)
			m_free(a[k]);
		efree(a);

		if (ic) {
			printlog("Intrinsic Correlation found. Good.\n");
			return;
		} else if (lmc) {
			printlog("Linear Model of Coregionalization found. Good.\n");
			return;
		}
	}
/*
 * lmc does not hold: check on Cauchy Swartz
 */
	pr_warning("No Intrinsic Correlation or Linear Model of Coregionalization found\nReason: %s", reason ? reason : "unknown");
	if (gl_nocheck == 0) {
		pr_warning("[add `set = list(nocheck = 1)' to the gstat() or krige() to ignore the following error]\n");
		ErrMsg(ER_IMPOSVAL, "variograms do not satisfy a legal model");
	}
	printlog("Now checking for Cauchy-Schwartz inequalities:\n");
	for (i = 0; i < n_vars; i++)
		for (j = 0; j < i; j++)
			if (is_valid_cs(v[LTI(i,i)], v[LTI(j,j)], v[LTI(i,j)])) {
				printlog("variogram(%s,%s) passed Cauchy-Schwartz\n",
					name_identifier(j), name_identifier(i));
			} else
				pr_warning("Cauchy-Schwartz inequality found for variogram(%s,%s)",
						name_identifier(j), name_identifier(i) );
	return;
}
static void ts27010_tty_flush_buffer(struct tty_struct *tty)
{
	pr_warning("ts27010: flush_buffer not implemented on line %d\n",
		tty->index);
}
Example #17
0
static int mipi_dsi_panel_power(int on)
{
    int flag_on = !!on;
    static int mipi_dsi_power_save_on;
    int rc = 0;

    if (mipi_dsi_power_save_on == flag_on)
        return 0;

    mipi_dsi_power_save_on = flag_on;

    if (reg_8901_l2 == NULL) {
        reg_8901_l2 = regulator_get(NULL, "8901_l2");
        if (IS_ERR(reg_8901_l2)) {
            reg_8901_l2 = NULL;
        }
    }
    if (reg_8901_mvs == NULL) {
        reg_8901_mvs = regulator_get(NULL, "8901_mvs0");
        if (IS_ERR(reg_8901_mvs)) {
            reg_8901_mvs = NULL;
        }
    }
    if (reg_8901_l3 == NULL) {
        reg_8901_l3 = regulator_get(NULL, "8901_l3");
        if (IS_ERR(reg_8901_l3)) {
            reg_8901_l3 = NULL;
        }
    }

    if(on) {
        rc = regulator_set_voltage(reg_8901_l2, 2800000, 2800000);
        if (!rc)
            rc = regulator_enable(reg_8901_l2);
        if (rc) {
            pr_err("'%s' regulator enable failed, rc=%d\n",
                   "8901_l2", rc);
            return rc;
        }
        udelay(100); // 100us

        if (!rc)
            rc = regulator_enable(reg_8901_mvs); // +1V8_LCD_IO
        if (rc) {
            pr_err("'%s' regulator enable failed, rc=%d\n",
                   "8901_mvs", rc);
            return rc;
        }
        udelay(500); // 100us

        rc = regulator_set_voltage(reg_8901_l3, 2800000, 2800000); // +3V0_LCD_VCI
        if (!rc)
            rc = regulator_enable(reg_8901_l3);
        if (rc) {
            pr_err("'%s' regulator enable failed, rc=%d\n",
                   "8901_l3", rc);
            return rc;
        }
        udelay(100); // 100us
    }
    else {

        gpio_set_value(MDP_VSYNC_GPIO,0);
        gpio_tlmm_config(GPIO_CFG(MDP_VSYNC_GPIO, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
        rc = regulator_disable(reg_8901_l3);
        if (rc)
            pr_warning("'%s' regulator disable failed, rc=%d\n",
                       "8901_l2", rc);
        udelay(100); // 100us
        rc = regulator_disable(reg_8901_mvs);
        if (rc)
            pr_warning("'%s' regulator disable failed, rc=%d\n",
                       "8901_mvs", rc);
        udelay(100); // 100us
        rc = regulator_disable(reg_8901_l2);
        if (rc)
            pr_warning("'%s' regulator disable failed, rc=%d\n",
                       "8901_l3", rc);
        udelay(100); // 100us
        pr_info("%s(off): success\n", __func__);

    }
    return 0;
}
static void ts27010_tty_unthrottle(struct tty_struct *tty)
{
	pr_warning("ts27010: unthrottle not implemented on line %d\n",
		tty->index);
}
/*
 * This procedure performs packet filtering and
 * updates BIBs and STs.
 */
static bool nat64_filtering_and_updating(u_int8_t l3protocol, u_int8_t l4protocol, 
        struct sk_buff *skb, struct nf_conntrack_tuple * inner)
{
    struct nat64_bib_entry *bib;
    struct nat64_st_entry *session;
    struct tcphdr *tcph = tcp_hdr(skb);
    //struct icmphdr *icmph = icmp_hdr(skb);
    bool res;
    int i;
    res = false;

    if (l3protocol == NFPROTO_IPV4) {
        pr_debug("NAT64: FNU - IPV4");
        /*
         * Query the STs for any records
         * If there's no active session for the specified 
         * connection, the packet should be dropped
         */
        switch (l4protocol) {
            case IPPROTO_TCP:
                //Query TCP ST
                //pr_debug("NAT64: TCP protocol not currently supported.");

                bib = nat64_bib_ipv4_lookup((*inner).dst.u3.in.s_addr, 
                        inner->dst.u.tcp.port, 
                        IPPROTO_TCP);
                if (!bib) {
                    pr_warning("NAT64: IPv4 - BIB is missing.");
                    return res;
                }

                session = nat64_session_ipv4_lookup(bib, 
                        inner->src.u3.in.s_addr, 
                        inner->src.u.tcp.port);				
                if (!session) {
                    pr_warning("NAT64: IPv4 - session entry is "
                            "missing.");
                    return res;
                }

                pr_debug("NAT64: TCP protocol for IPv4 "
                        "finished properly.");
                res = true;
                break;
            case IPPROTO_UDP:
                //Query UDP BIB and ST

                bib = nat64_bib_ipv4_lookup(inner->dst.u3.in.s_addr, 
                        (inner->dst.u.udp.port),
                        IPPROTO_UDP);
                if (!bib) {
                    pr_warning("NAT64: IPv4 - BIB is missing.");
                    return res;
                }

                session = nat64_session_ipv4_lookup(bib, 
                        inner->src.u3.in.s_addr, 
                        inner->src.u.udp.port);				
                if (!session) {
                    pr_warning("NAT64: IPv4 - session entry is "
                            "missing.");
                    return res;
                }

                pr_debug("NAT64: UDP protocol for IPv4 "
                        "finished properly.");
                res = true;
                break;
            case IPPROTO_ICMP:
                //Query ICMP ST
                bib = nat64_bib_ipv4_lookup(inner->dst.u3.in.s_addr, 
                        (inner->src.u.icmp.id),
                        IPPROTO_ICMPV6);

                if (!bib) {
                    pr_debug("No se pudo con T':%pI4.", &inner->dst.u3.in.s_addr);
                    pr_debug("Inner: %hu", ntohs(inner->src.u.icmp.id));
                    pr_warning("NAT64: IPv4 - BIB is missing.");
                    return res;
                }

                session = nat64_session_ipv4_lookup(bib, 
                        inner->src.u3.in.s_addr, 
                        inner->src.u.icmp.id);				

                if (!session) {
                    pr_warning("NAT64: IPv4 - session entry is "
                            "missing.");
                    return res;
                }
                res = true;
                break;
            case IPPROTO_ICMPV6:
                //Query ICMPV6 ST
                pr_debug("NAT64: ICMPv6 protocol not "
                        "currently supported.");
                break;
            default:
                //Drop packet
                pr_debug("NAT64: layer 4 protocol not "
                        "currently supported.");
                break;
        }
        goto end;
    } else if (l3protocol == NFPROTO_IPV6) {
        pr_debug("NAT64: FNU - IPV6");	
        // FIXME: Return true if it is not H&H. A special return code 
        // will have to be added as a param in the future to handle it.
        res = false;
        nat64_clean_expired_sessions(&expiry_queue,i);
        for (i = 0; i < NUM_EXPIRY_QUEUES; i++)
        	nat64_clean_expired_sessions(&expiry_base[i].queue, i);

        switch (l4protocol) {
            case IPPROTO_TCP:
                /*
                 * FIXME: Finish TCP session handling
                 */
                pr_debug("NAT64: FNU - TCP");

                bib = nat64_bib_ipv6_lookup(&(inner->src.u3.in6), 
                        inner->src.u.tcp.port, IPPROTO_TCP);
                if(bib) {
                    session = nat64_session_ipv4_lookup(bib, 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                //prefix_len), 
                            ipv6_pref_len), 
                            inner->dst.u.tcp.port);
                    if(session) {
                        nat64_tcp6_fsm(session, tcph);
                    }else{
                        pr_debug("Create a session entry, no sesion.");
                        session = nat64_session_create_tcp(bib, 
                                &(inner->dst.u3.in6), 
                                nat64_extract_ipv4(
                                    inner->dst.u3.in6, 
                                    //prefix_len), 
                                ipv6_pref_len), 
                                inner->dst.u.tcp.port, 
                                TCP_TRANS);
                    }
                } else if (tcph->syn) {
                    pr_debug("Create a new BIB and Session entry syn.");
                    bib = nat64_bib_session_create_tcp(
                            &(inner->src.u3.in6), 
                            &(inner->dst.u3.in6), 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                //prefix_len), 
                            ipv6_pref_len), 
                        inner->src.u.tcp.port, 
                        inner->dst.u.tcp.port, 
                        l4protocol, TCP_TRANS);

                    session = list_entry(bib->sessions.next, struct nat64_st_entry, list);
                    session->state = V6_SYN_RCV;
                }
                res = true;
                break;
            case IPPROTO_UDP:
                pr_debug("NAT64: FNU - UDP");
                /*
                 * Verify if there's any binding for the src 
                 * address by querying the UDP BIB. If there's a
                 * binding, verify if there's a connection to the 
                 * specified destination by querying the UDP ST.
                 * 
                 * In case these records are missing, they 
                 * should be created.
                 */
                bib = nat64_bib_ipv6_lookup(&(inner->src.u3.in6), 
                        inner->src.u.udp.port,
                        IPPROTO_UDP);
                if (bib) {
                    session = nat64_session_ipv4_lookup(bib, 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                ipv6_pref_len), 
                            inner->dst.u.udp.port);
                    if (session) {
                        nat64_session_renew(session, UDP_DEFAULT);
                    } else {
                        session = nat64_session_create(bib, 
                                &(inner->dst.u3.in6), 
                                nat64_extract_ipv4(
                                    inner->dst.u3.in6, 
                                    ipv6_pref_len), 
                                inner->dst.u.udp.port, 
                                UDP_DEFAULT);
                    }
                } else {
                    pr_debug("Create a new BIB and Session entry.");
                    bib = nat64_bib_session_create(
                            &(inner->src.u3.in6), 
                            &(inner->dst.u3.in6), 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                ipv6_pref_len), 
                            inner->src.u.udp.port, 
                            inner->dst.u.udp.port, 
                            l4protocol, UDP_DEFAULT);
                }
                res = true;
                break;
            case IPPROTO_ICMP:
                //Query ICMP ST
                pr_debug("NAT64: ICMP protocol not currently "
                        "supported.");
                break;
            case IPPROTO_ICMPV6:
                //Query ICMPV6 ST
                bib = nat64_bib_ipv6_lookup(&(inner->src.u3.in6), 
                        inner->src.u.icmp.id, IPPROTO_ICMP);
                if(bib) {
                    session = nat64_session_ipv4_lookup(bib, 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                //prefix_len), 
                            ipv6_pref_len), 
                            inner->src.u.icmp.id);
                    if(session) {
                        nat64_session_renew(session, ICMP_DEFAULT);
                    }else {
                        session = nat64_session_create_icmp(bib, 
                                &(inner->dst.u3.in6), 
                                nat64_extract_ipv4(
                                    inner->dst.u3.in6, 
                                    ipv6_pref_len), 
                                inner->src.u.icmp.id, 
                                ICMP_DEFAULT);
                    }
                } else {
                    pr_debug("Create a new BIB and Session entry.");
                    bib = nat64_bib_session_create_icmp(
                            &(inner->src.u3.in6), 
                            &(inner->dst.u3.in6), 
                            nat64_extract_ipv4(
                                inner->dst.u3.in6, 
                                ipv6_pref_len), 
                            inner->src.u.icmp.id, 
                            inner->src.u.icmp.id, 
                            l4protocol, ICMP_DEFAULT);
                }
                res = true;
                /*pr_debug("NAT64: ICMPv6 protocol not currently "*/
                /*"supported.");*/
                break;
            default:
                //Drop packet
                pr_debug("NAT64: layer 4 protocol not currently "
                        "supported.");
                break;
        }
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in = port->port_usb->in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			/* Queue zero length packet */
#if 1 //QCT_SBA
			if (prev_len && (prev_len % in->maxpacket == 0)) {
#else
			if (prev_len & (prev_len % in->maxpacket == 0)) {
#endif
				req->length = 0;
				list_del(&req->list);

				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;

	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}

/*
 * Context: caller owns port_lock, and port_usb is set
 */
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->read_pool;
	struct usb_ep		*out = port->port_usb->out;
	unsigned		started = 0;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			status;
		struct tty_struct	*tty;

		/* no more rx if closed */
		tty = port->port_tty;
		if (!tty)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		list_del(&req->list);
		req->length = RX_BUF_SIZE;

		/* drop lock while we call out; the controller driver
		 * may need to call us back (e.g. for disconnect)
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(out, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			started = 0;
			gs_free_req(out, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", out->name, status);
			list_add(&req->list, pool);
			break;
		}
		started++;

	}
	return started;
}

/*
 * RX work queue takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
	}

	/* Push from tty to ldisc; this is immediate with low_latency, and
	 * may trigger callbacks to this driver ... so drop the spinlock.
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}

static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	/* Queue all received data until the tty layer is ready for it. */
	spin_lock_irqsave(&port->port_lock, flags);
	list_add_tail(&req->list, &port->read_queue);
	queue_work(gserial_wq, &port->push);
	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	spin_lock_irqsave(&port->port_lock, flags);
	list_add(&req->list, &port->write_pool);

	switch (req->status) {
	default:
		/* presumably a transient fault */
		pr_warning("%s: unexpected %s status %d\n",
				__func__, ep->name, req->status);
		/* FALL THROUGH */
	case 0:
		/* normal completion */
		if (port->port_usb)
			gs_start_tx(port);
		break;

	case -ESHUTDOWN:
		/* disconnect */
		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
		break;
	}

	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
{
	struct usb_request	*req;

	while (!list_empty(head)) {
		req = list_entry(head->next, struct usb_request, list);
		list_del(&req->list);
		gs_free_req(ep, req);
	}
}

static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
		int num, int size,
		void (*fn)(struct usb_ep *, struct usb_request *))
{
	int			i;
	struct usb_request	*req;

	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
	 * do quite that many this time, don't fail ... we just won't
	 * be as speedy as we might otherwise be.
	 */
	for (i = 0; i < num; i++) {
		req = gs_alloc_req(ep, size, GFP_ATOMIC);
		if (!req)
			return list_empty(head) ? -ENOMEM : 0;
		req->complete = fn;
		list_add_tail(&req->list, head);
	}
	return 0;
}
static __init void da830_evm_usb_init(void)
{
	u32 cfgchip2;
	int ret;

	/*
	 * Set up USB clock/mode in the CFGCHIP2 register.
	 * FYI:  CFGCHIP2 is 0x0000ef00 initially.
	 */
	cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));

	/* USB2.0 PHY reference clock is 24 MHz */
	cfgchip2 &= ~CFGCHIP2_REFFREQ;
	cfgchip2 |=  CFGCHIP2_REFFREQ_24MHZ;

	/*
	 * Select internal reference clock for USB 2.0 PHY
	 * and use it as a clock source for USB 1.1 PHY
	 * (this is the default setting anyway).
	 */
	cfgchip2 &= ~CFGCHIP2_USB1PHYCLKMUX;
	cfgchip2 |=  CFGCHIP2_USB2PHYCLKMUX;

	/*
	 * We have to override VBUS/ID signals when MUSB is configured into the
	 * host-only mode -- ID pin will float if no cable is connected, so the
	 * controller won't be able to drive VBUS thinking that it's a B-device.
	 * Otherwise, we want to use the OTG mode and enable VBUS comparators.
	 */
	cfgchip2 &= ~CFGCHIP2_OTGMODE;
#ifdef	CONFIG_USB_MUSB_HOST
	cfgchip2 |=  CFGCHIP2_FORCE_HOST;
#else
	cfgchip2 |=  CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN;
#endif

	__raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));

	/* USB_REFCLKIN is not used. */
	ret = davinci_cfg_reg(DA830_USB0_DRVVBUS);
	if (ret)
		pr_warning("%s: USB 2.0 PinMux setup failed: %d\n",
			   __func__, ret);
	else {
		/*
		 * TPS2065 switch @ 5V supplies 1 A (sustains 1.5 A),
		 * with the power on to power good time of 3 ms.
		 */
		ret = da8xx_register_usb20(1000, 3);
		if (ret)
			pr_warning("%s: USB 2.0 registration failed: %d\n",
				   __func__, ret);
	}

	ret = davinci_cfg_reg_list(da830_evm_usb11_pins);
	if (ret) {
		pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
			   __func__, ret);
		return;
	}

	ret = gpio_request(ON_BD_USB_DRV, "ON_BD_USB_DRV");
	if (ret) {
		printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
		       "power control: %d\n", __func__, ret);
		return;
	}
	gpio_direction_output(ON_BD_USB_DRV, 0);

	ret = gpio_request(ON_BD_USB_OVC, "ON_BD_USB_OVC");
	if (ret) {
		printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
		       "over-current indicator: %d\n", __func__, ret);
		return;
	}
	gpio_direction_input(ON_BD_USB_OVC);

	ret = da8xx_register_usb11(&da830_evm_usb11_pdata);
	if (ret)
		pr_warning("%s: USB 1.1 registration failed: %d\n",
			   __func__, ret);
}
Example #22
0
static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	int len;
	int i;
#ifndef NO_NOPCOMMANDS
	int next_nop;
#endif
	struct priv *p = netdev_priv(dev);

	netif_stop_queue(dev);

	len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;

	if (len != skb->len)
		memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
	skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);

#if (NUM_XMIT_BUFFS == 1)
#ifdef NO_NOPCOMMANDS
	p->xmit_buffs[0]->size = TBD_LAST | len;
	for (i = 0; i < 16; i++) {
		p->scb->cbl_offset = make16(p->xmit_cmds[0]);
		p->scb->cmd = CUC_START;
		p->xmit_cmds[0]->cmd_status = 0;
			elmc_attn586();
		dev->trans_start = jiffies;
		if (!i) {
			dev_kfree_skb(skb);
		}
		WAIT_4_SCB_CMD();
		if ((p->scb->status & CU_ACTIVE)) {	/* test it, because CU sometimes doesn't start immediately */
			break;
		}
		if (p->xmit_cmds[0]->cmd_status) {
			break;
		}
		if (i == 15) {
			pr_warning("%s: Can't start transmit-command.\n", dev->name);
		}
	}
#else
	next_nop = (p->nop_point + 1) & 0x1;
	p->xmit_buffs[0]->size = TBD_LAST | len;

	p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
	    = make16((p->nop_cmds[next_nop]));
	p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;

	p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
	dev->trans_start = jiffies;
	p->nop_point = next_nop;
	dev_kfree_skb(skb);
#endif
#else
	p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
	if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
		next_nop = 0;
	}
	p->xmit_cmds[p->xmit_count]->cmd_status = 0;
	p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
	    = make16((p->nop_cmds[next_nop]));
	p->nop_cmds[next_nop]->cmd_status = 0;
		p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
	dev->trans_start = jiffies;
	p->xmit_count = next_nop;
	if (p->xmit_count != p->xmit_last)
		netif_wake_queue(dev);
	dev_kfree_skb(skb);
#endif
	return NETDEV_TX_OK;
}
Example #23
0
/**
 * gserial_setup - initialize TTY driver for one or more ports
 * @g: gadget to associate with these ports
 * @count: how many ports to support
 * Context: may sleep
 *
 * The TTY stack needs to know in advance how many devices it should
 * plan to manage.  Use this call to set up the ports you will be
 * exporting through USB.  Later, connect them to functions based
 * on what configuration is activated by the USB host; and disconnect
 * them as appropriate.
 *
 * An example would be a two-configuration device in which both
 * configurations expose port 0, but through different functions.
 * One configuration could even expose port 1 while the other
 * one doesn't.
 *
 * Returns negative errno or zero.
 */
int gserial_setup(struct usb_gadget *g, unsigned count)
{
	unsigned			i;
	struct usb_cdc_line_coding	coding;
	int				status;

	if (count == 0 || count > N_PORTS)
		return -EINVAL;

	gs_tty_driver = alloc_tty_driver(count);
	if (!gs_tty_driver)
		return -ENOMEM;

	gs_tty_driver->owner = THIS_MODULE;
	gs_tty_driver->driver_name = "g_serial";
	gs_tty_driver->name = PREFIX;
	/* uses dynamically assigned dev_t values */

	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
				| TTY_DRIVER_RESET_TERMIOS;
	gs_tty_driver->init_termios = tty_std_termios;

	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
	 * MS-Windows.  Otherwise, most of these flags shouldn't affect
	 * anything unless we were to actually hook up to a serial line.
	 */
	gs_tty_driver->init_termios.c_cflag =
			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
	gs_tty_driver->init_termios.c_ispeed = 9600;
	gs_tty_driver->init_termios.c_ospeed = 9600;

	coding.dwDTERate = cpu_to_le32(9600);
	coding.bCharFormat = 8;
	coding.bParityType = USB_CDC_NO_PARITY;
	coding.bDataBits = USB_CDC_1_STOP_BITS;

	tty_set_operations(gs_tty_driver, &gs_tty_ops);

	gserial_wq = create_singlethread_workqueue("k_gserial");
	if (!gserial_wq) {
		status = -ENOMEM;
		goto fail;
	}

	/* make devices be openable */
	for (i = 0; i < count; i++) {
		mutex_init(&ports[i].lock);
		status = gs_port_alloc(i, &coding);
		if (status) {
			count = i;
			goto fail;
		}
	}
	n_ports = count;

	/* export the driver ... */
	status = tty_register_driver(gs_tty_driver);
	if (status) {
		put_tty_driver(gs_tty_driver);
		pr_err("%s: cannot register, err %d\n",
				__func__, status);
		goto fail;
	}

	/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
	for (i = 0; i < count; i++) {
		struct device	*tty_dev;

		tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
		if (IS_ERR(tty_dev))
			pr_warning("%s: no classdev for port %d, err %ld\n",
				__func__, i, PTR_ERR(tty_dev));
	}

	for (i = 0; i < count; i++)
		usb_debugfs_init(ports[i].port, i);

	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
			count, (count == 1) ? "" : "s");

	return status;
fail:
	while (count--)
		kfree(ports[count].port);
	if (gserial_wq)
		destroy_workqueue(gserial_wq);
	put_tty_driver(gs_tty_driver);
	gs_tty_driver = NULL;
	return status;
}
Example #24
0
static int init586(struct net_device *dev)
{
	void *ptr;
	unsigned long s;
	int i, result = 0;
	struct priv *p = netdev_priv(dev);
	volatile struct configure_cmd_struct *cfg_cmd;
	volatile struct iasetup_cmd_struct *ias_cmd;
	volatile struct tdr_cmd_struct *tdr_cmd;
	volatile struct mcsetup_cmd_struct *mc_cmd;
	struct dev_mc_list *dmi;
	int num_addrs = netdev_mc_count(dev);

	ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));

	cfg_cmd = (struct configure_cmd_struct *) ptr;	/* configure-command */
	cfg_cmd->cmd_status = 0;
	cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
	cfg_cmd->cmd_link = 0xffff;

	cfg_cmd->byte_cnt = 0x0a;	/* number of cfg bytes */
	cfg_cmd->fifo = 0x08;	/* fifo-limit (8=tx:32/rx:64) */
	cfg_cmd->sav_bf = 0x40;	/* hold or discard bad recv frames (bit 7) */
	cfg_cmd->adr_len = 0x2e;	/* addr_len |!src_insert |pre-len |loopback */
	cfg_cmd->priority = 0x00;
	cfg_cmd->ifs = 0x60;
	cfg_cmd->time_low = 0x00;
	cfg_cmd->time_high = 0xf2;
	cfg_cmd->promisc = 0;
	if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
		cfg_cmd->promisc = 1;
	cfg_cmd->carr_coll = 0x00;

	p->scb->cbl_offset = make16(cfg_cmd);

	p->scb->cmd = CUC_START;	/* cmd.-unit start */
	elmc_id_attn586();

	s = jiffies;		/* warning: only active with interrupts on !! */
	while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
		if (time_after(jiffies, s + 30*HZ/100))
			break;
	}

	if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
		pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
		return 1;
	}
	/*
	 * individual address setup
	 */
	ias_cmd = (struct iasetup_cmd_struct *) ptr;

	ias_cmd->cmd_status = 0;
	ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
	ias_cmd->cmd_link = 0xffff;

	memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);

	p->scb->cbl_offset = make16(ias_cmd);

	p->scb->cmd = CUC_START;	/* cmd.-unit start */
	elmc_id_attn586();

	s = jiffies;
	while (!(ias_cmd->cmd_status & STAT_COMPL)) {
		if (time_after(jiffies, s + 30*HZ/100))
			break;
	}

	if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
		pr_warning("%s (elmc): individual address setup command failed: %04x\n",
			dev->name, ias_cmd->cmd_status);
		return 1;
	}
	/*
	 * TDR, wire check .. e.g. no resistor e.t.c
	 */
	tdr_cmd = (struct tdr_cmd_struct *) ptr;

	tdr_cmd->cmd_status = 0;
	tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
	tdr_cmd->cmd_link = 0xffff;
	tdr_cmd->status = 0;

	p->scb->cbl_offset = make16(tdr_cmd);

	p->scb->cmd = CUC_START;	/* cmd.-unit start */
	elmc_attn586();

	s = jiffies;
	while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
		if (time_after(jiffies, s + 30*HZ/100)) {
			pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
			result = 1;
			break;
		}
	}

	if (!result) {
		DELAY(2);	/* wait for result */
		result = tdr_cmd->status;

		p->scb->cmd = p->scb->status & STAT_MASK;
		elmc_id_attn586();	/* ack the interrupts */

		if (result & TDR_LNK_OK) {
			/* empty */
		} else if (result & TDR_XCVR_PRB) {
			pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
		} else if (result & TDR_ET_OPN) {
			pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
		} else if (result & TDR_ET_SRT) {
			if (result & TDR_TIMEMASK)	/* time == 0 -> strange :-) */
				pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
		} else {
			pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
		}
	}
	/*
	 * ack interrupts
	 */
	p->scb->cmd = p->scb->status & STAT_MASK;
	elmc_id_attn586();

	/*
	 * alloc nop/xmit-cmds
	 */
#if (NUM_XMIT_BUFFS == 1)
	for (i = 0; i < 2; i++) {
		p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
		p->nop_cmds[i]->cmd_cmd = CMD_NOP;
		p->nop_cmds[i]->cmd_status = 0;
		p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
		ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
	}
	p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr;	/* transmit cmd/buff 0 */
	ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
#else
	for (i = 0; i < NUM_XMIT_BUFFS; i++) {
		p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
		p->nop_cmds[i]->cmd_cmd = CMD_NOP;
		p->nop_cmds[i]->cmd_status = 0;
		p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
		ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
		p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr;	/*transmit cmd/buff 0 */
		ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
	}
#endif

	ptr = alloc_rfa(dev, (void *) ptr);	/* init receive-frame-area */

	/*
	 * Multicast setup
	 */

	if (num_addrs) {
		/* I don't understand this: do we really need memory after the init? */
		int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
		if (len <= 0) {
			pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
		} else {
			if (len < num_addrs) {
				num_addrs = len;
				pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
				       dev->name, num_addrs);
			}
			mc_cmd = (struct mcsetup_cmd_struct *) ptr;
			mc_cmd->cmd_status = 0;
			mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
			mc_cmd->cmd_link = 0xffff;
			mc_cmd->mc_cnt = num_addrs * 6;
			i = 0;
			netdev_for_each_mc_addr(dmi, dev)
				memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
			p->scb->cbl_offset = make16(mc_cmd);
			p->scb->cmd = CUC_START;
			elmc_id_attn586();
			s = jiffies;
			while (!(mc_cmd->cmd_status & STAT_COMPL)) {
				if (time_after(jiffies, s + 30*HZ/100))
					break;
			}
			if (!(mc_cmd->cmd_status & STAT_COMPL)) {
				pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
			}
		}
	}
	/*
	 * alloc xmit-buffs / init xmit_cmds
	 */
	for (i = 0; i < NUM_XMIT_BUFFS; i++) {
		p->xmit_cbuffs[i] = (char *) ptr;	/* char-buffs */
		ptr = (char *) ptr + XMIT_BUFF_SIZE;
		p->xmit_buffs[i] = (struct tbd_struct *) ptr;	/* TBD */
		ptr = (char *) ptr + sizeof(struct tbd_struct);
		if ((void *) ptr > (void *) p->iscp) {
			pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
			return 1;
		}
		memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
		memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
		p->xmit_cmds[i]->cmd_status = STAT_COMPL;
		p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
		p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
		p->xmit_buffs[i]->next = 0xffff;
		p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
	}

	p->xmit_count = 0;
	p->xmit_last = 0;
#ifndef NO_NOPCOMMANDS
	p->nop_point = 0;
#endif

	/*
	 * 'start transmitter' (nop-loop)
	 */
#ifndef NO_NOPCOMMANDS
	p->scb->cbl_offset = make16(p->nop_cmds[0]);
	p->scb->cmd = CUC_START;
	elmc_id_attn586();
	WAIT_4_SCB_CMD();
#else
	p->xmit_cmds[0]->cmd_link = 0xffff;
	p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
#endif

	return 0;
}
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err, ddr = 0;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;

		/* If doing byte addressing, check if required to do sector
		 * addressing.  Handle the case of <2GB cards needing sector
		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
		 * ocr register has bit 30 set for sector addressing.
		 */
		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
			mmc_card_set_blockaddr(card);

		/* Erase size depends on CSD and Extended CSD */
		mmc_set_erase_size(card);
	}

	/*
	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
	 * bit.  This bit will be lost every time after a reset or power off.
	 */
	if (card->ext_csd.enhanced_area_en) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_ERASE_GROUP_DEF, 1);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			err = 0;
			/*
			 * Just disable enhanced area off & sz
			 * will try to enable ERASE_GROUP_DEF
			 * during next time reinit
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		} else {
			card->ext_csd.erase_group_def = 1;
			/*
			 * enable ERASE_GRP_DEF successfully.
			 * This will affect the erase size, so
			 * here need to reset erase size
			 */
			mmc_set_erase_size(card);
		}
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Enable HPI feature (if supported)
	 */
	if (card->ext_csd.hpi && (card->host->caps & MMC_CAP_BKOPS)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HPI_MGMT, 1);
		if (err && err != -EBADMSG)
			goto free_card;
		if (err) {
			pr_warning("%s: Enabling HPI failed\n",
				   mmc_hostname(card->host));
			err = 0;
		} else {
			card->ext_csd.hpi_en = 1;
		}
	}

	/*
	 * Enable Background ops feature (if supported)
	 */
	if (card->ext_csd.bk_ops && (card->host->caps & MMC_CAP_BKOPS)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_BKOPS_EN, 1);
		if (err && err != -EBADMSG)
			goto free_card;
		if (err) {
			pr_warning("%s: Enabling BK ops failed\n",
				   mmc_hostname(card->host));
			err = 0;
		} else {
			card->ext_csd.bk_ops_en = 1;
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Indicate DDR mode (if supported).
	 */
	if (mmc_card_highspeed(card)) {
		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
			&& ((host->caps & (MMC_CAP_1_8V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_8V_DDR_MODE;
		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
			&& ((host->caps & (MMC_CAP_1_2V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_2V_DDR_MODE;
	}

	/*
	 * Activate wide bus and DDR (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		static unsigned ext_csd_bits[][2] = {
			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
		};
		static unsigned bus_widths[] = {
			MMC_BUS_WIDTH_8,
			MMC_BUS_WIDTH_4,
			MMC_BUS_WIDTH_1
		};
		unsigned idx, bus_width = 0;

		if (host->caps & MMC_CAP_8_BIT_DATA)
			idx = 0;
		else
			idx = 1;
		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
			bus_width = bus_widths[idx];
			if (bus_width == MMC_BUS_WIDTH_1)
				ddr = 0; /* no DDR for 1-bit width */
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][0]);
			if (!err) {
				mmc_set_bus_width(card->host, bus_width);
				/*
				 * If controller can't handle bus width test,
				 * use the highest bus width to maintain
				 * compatibility with previous MMC behavior.
				 */
				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
					break;
				err = mmc_bus_test(card, bus_width);
				if (!err)
					break;
			}
		}

		if (!err && ddr) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BUS_WIDTH,
					ext_csd_bits[idx][1]);
		}
		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
				"failed\n", mmc_hostname(card->host),
				1 << bus_width, ddr);
			goto free_card;
		} else if (ddr) {
			/*
			 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
			 * signaling.
			 *
			 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
			 *
			 * 1.8V vccq at 3.3V core voltage (vcc) is not required
			 * in the JEDEC spec for DDR.
			 *
			 * Do not force change in vccq since we are obviously
			 * working and no change to vccq is needed.
			 *
			 * WARNING: eMMC rules are NOT the same as SD DDR
			 */
			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
				err = mmc_set_signal_voltage(host,
					MMC_SIGNAL_VOLTAGE_120);
				if (err)
					goto err;
			}
			mmc_card_set_ddr_mode(card);
			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
			mmc_set_bus_width(card->host, bus_width);
		}
	}

	if (!oldcard)
		host->card = card;

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}
Example #26
0
static irqreturn_t
elmc_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	unsigned short stat;
	struct priv *p;

	if (!netif_running(dev)) {
		/* The 3c523 has this habit of generating interrupts during the
		   reset.  I'm not sure if the ni52 has this same problem, but it's
		   really annoying if we haven't finished initializing it.  I was
		   hoping all the elmc_id_* commands would disable this, but I
		   might have missed a few. */

		elmc_id_attn586();	/* ack inter. and disable any more */
		return IRQ_HANDLED;
	} else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
		/* wasn't this device */
		return IRQ_NONE;
	}
	/* reading ELMC_CTRL also clears the INT bit. */

	p = netdev_priv(dev);

	while ((stat = p->scb->status & STAT_MASK))
	{
		p->scb->cmd = stat;
		elmc_attn586();	/* ack inter. */

		if (stat & STAT_CX) {
			/* command with I-bit set complete */
			elmc_xmt_int(dev);
		}
		if (stat & STAT_FR) {
			/* received a frame */
			elmc_rcv_int(dev);
		}
#ifndef NO_NOPCOMMANDS
		if (stat & STAT_CNA) {
			/* CU went 'not ready' */
			if (netif_running(dev)) {
				pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
					dev->name, (int) stat, (int) p->scb->status);
			}
		}
#endif

		if (stat & STAT_RNR) {
			/* RU went 'not ready' */

			if (p->scb->status & RU_SUSPEND) {
				/* special case: RU_SUSPEND */

				WAIT_4_SCB_CMD();
				p->scb->cmd = RUC_RESUME;
				elmc_attn586();
			} else {
				pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
					dev->name, (int) stat, (int) p->scb->status);
				elmc_rnr_int(dev);
			}
		}
		WAIT_4_SCB_CMD();	/* wait for ack. (elmc_xmt_int can be faster than ack!!) */
		if (p->scb->cmd) {	/* timed out? */
			break;
		}
	}
	return IRQ_HANDLED;
}
void __init setup_per_cpu_areas(void)
{
	unsigned int cpu;
	unsigned long delta;
	int rc;

	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);

	/*
	 * Allocate percpu area.  Embedding allocator is our favorite;
	 * however, on NUMA configurations, it can result in very
	 * sparse unit mapping and vmalloc area isn't spacious enough
	 * on 32bit.  Use page in that case.
	 */
#ifdef CONFIG_X86_32
	if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
		pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
	rc = -EINVAL;
	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
		const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
		const size_t dyn_size = PERCPU_MODULE_RESERVE +
			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;

		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
					    dyn_size, atom_size,
					    pcpu_cpu_distance,
					    pcpu_fc_alloc, pcpu_fc_free);
		if (rc < 0)
			pr_warning("%s allocator failed (%d), falling back to page size\n",
				   pcpu_fc_names[pcpu_chosen_fc], rc);
	}
	if (rc < 0)
		rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
					   pcpu_fc_alloc, pcpu_fc_free,
					   pcpup_populate_pte);
	if (rc < 0)
		panic("cannot initialize percpu area (err=%d)", rc);

	/* alrighty, percpu areas up and running */
	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu) {
		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
		per_cpu(cpu_number, cpu) = cpu;
		setup_percpu_segment(cpu);
		setup_stack_canary_segment(cpu);
		/*
		 * Copy data used in early init routines from the
		 * initial arrays to the per cpu data areas.  These
		 * arrays then become expendable and the *_early_ptr's
		 * are zeroed indicating that the static arrays are
		 * gone.
		 */
#ifdef CONFIG_X86_LOCAL_APIC
		per_cpu(x86_cpu_to_apicid, cpu) =
			early_per_cpu_map(x86_cpu_to_apicid, cpu);
		per_cpu(x86_bios_cpu_apicid, cpu) =
			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#endif
#ifdef CONFIG_X86_64
		per_cpu(irq_stack_ptr, cpu) =
			per_cpu(irq_stack_union.irq_stack, cpu) +
			IRQ_STACK_SIZE - 64;
#ifdef CONFIG_NUMA
		per_cpu(x86_cpu_to_node_map, cpu) =
			early_per_cpu_map(x86_cpu_to_node_map, cpu);
#endif
#endif
		/*
		 * Up to this point, the boot CPU has been using .data.init
		 * area.  Reload any changed state for the boot CPU.
		 */
		if (cpu == boot_cpu_id)
			switch_to_new_gdt(cpu);
	}

	/* indicate the early static arrays will soon be gone */
#ifdef CONFIG_X86_LOCAL_APIC
	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#endif
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif

#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
	/*
	 * make sure boot cpu node_number is right, when boot cpu is on the
	 * node that doesn't have mem installed
	 */
	per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
#endif

	/* Setup node to cpumask map */
	setup_node_to_cpumask_map();

	/* Setup cpu initialized, callin, callout masks */
	setup_cpu_local_masks();
}
Example #28
0
static ssize_t pn544_dev_read(struct file *filp, char __user *buf,
		size_t count, loff_t *offset)
{
	struct pn544_dev *pn544_dev = filp->private_data;
	char tmp[MAX_BUFFER_SIZE];
	int ret;

	if (count > MAX_BUFFER_SIZE)
		count = MAX_BUFFER_SIZE;

	pr_debug("%s : reading %zu bytes.\n", __func__, count);
#ifdef FEATURE_PN544_KERNEL_LOG
    pr_info("==> %s #1 (ven, firm, irq)=(%d, %d, %d)\n", __func__, gpio_get_value(pn544_dev->ven_gpio), gpio_get_value(pn544_dev->firm_gpio), gpio_get_value(pn544_dev->irq_gpio));
#endif

	mutex_lock(&pn544_dev->read_mutex);

	if (!gpio_get_value(pn544_dev->irq_gpio)) {
		if (filp->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			goto fail;
		}

		pn544_dev->irq_enabled = true;

#ifdef READ_IRQ_MODIFY
		do_reading=0;
#endif

		enable_irq(pn544_dev->client->irq);
#ifdef READ_IRQ_MODIFY
		ret = wait_event_interruptible(pn544_dev->read_wq, do_reading);
#else
		ret = wait_event_interruptible(pn544_dev->read_wq,
				gpio_get_value(pn544_dev->irq_gpio));
#endif
		pn544_disable_irq(pn544_dev);

#ifdef READ_IRQ_MODIFY
		if(cancle_read == true)
		{
		    cancle_read = false;
		    ret = -1;
		    goto fail;
		}
#endif
		if (ret)
			goto fail;

	}

#ifdef FEATURE_PN544_KERNEL_LOG
    pr_info("==> %s #2 (ven, firm, irq)=(%d, %d, %d)\n", __func__, gpio_get_value(pn544_dev->ven_gpio), gpio_get_value(pn544_dev->firm_gpio), gpio_get_value(pn544_dev->irq_gpio));
#endif
	/* Read data */
	ret = i2c_master_recv(pn544_dev->client, tmp, count);
	mutex_unlock(&pn544_dev->read_mutex);

	if (ret < 0) {
		pr_err("%s: i2c_master_recv returned %d\n", __func__, ret);
		return ret;
	}
	if (ret > count) {
		pr_err("%s: received too many bytes from i2c (%d)\n",
			__func__, ret);
		return -EIO;
	}
	if (copy_to_user(buf, tmp, ret)) {
		pr_warning("%s : failed to copy to user space\n", __func__);
		return -EFAULT;
	}
	return ret;

fail:
	mutex_unlock(&pn544_dev->read_mutex);
	return ret;
}
int acpuclk_set_rate(int cpu, unsigned long rate, enum setrate_reason reason)
{
	struct clkctl_acpu_speed *tgt_s, *strt_s;
	int res, rc = 0;

	if (reason == SETRATE_CPUFREQ)
		mutex_lock(&drv_state.lock);

	strt_s = drv_state.current_speed;

	if (rate == strt_s->acpuclk_khz)
		goto out;

	for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++)
		if (tgt_s->acpuclk_khz == rate)
			break;

	if (tgt_s->acpuclk_khz == 0) {
		rc = -EINVAL;
		goto out;
	}

	if (reason == SETRATE_CPUFREQ) {
		/* Increase VDD if needed. */
		if (tgt_s->vdd > strt_s->vdd) {
			rc = acpuclk_set_vdd_level(tgt_s->vdd);
			if (rc) {
				pr_err("Unable to increase ACPU vdd (%d)\n",
					rc);
				goto out;
			}
		}
	}

	dprintk("Switching from ACPU rate %u KHz -> %u KHz\n",
		strt_s->acpuclk_khz, tgt_s->acpuclk_khz);

	if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
		select_clk_source(tgt_s);
		/* Select core source because target may be AXI. */
		select_core_source(tgt_s->core_src_sel);
	} else if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll == ACPU_PLL_3) {
		scpll_enable(1, tgt_s);
		mb();
		select_core_source(tgt_s->core_src_sel);
	} else if (strt_s->pll == ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
		select_clk_source(tgt_s);
		select_core_source(tgt_s->core_src_sel);
		mb();
		scpll_enable(0, NULL);
	} else {
		scpll_change_freq(tgt_s->l_value);
	}

	/* Update the driver state with the new clock freq */
	drv_state.current_speed = tgt_s;

	/* Re-adjust lpj for the new clock speed. */
	loops_per_jiffy = tgt_s->lpj;

	/* Nothing else to do for SWFI. */
	if (reason == SETRATE_SWFI)
		goto out;

	if (strt_s->ebi1clk_khz != tgt_s->ebi1clk_khz) {
		res = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK,
			tgt_s->ebi1clk_khz * 1000);
		if (res < 0)
			pr_warning("Setting EBI1/AXI min rate failed (%d)\n",
									res);
	}

	/* Nothing else to do for power collapse */
	if (reason == SETRATE_PC)
		goto out;

	/* Drop VDD level if we can. */
	if (tgt_s->vdd < strt_s->vdd) {
		res = acpuclk_set_vdd_level(tgt_s->vdd);
		if (res)
			pr_warning("Unable to drop ACPU vdd (%d)\n", res);
	}

	dprintk("ACPU speed change complete\n");
out:
	if (reason == SETRATE_CPUFREQ)
		mutex_unlock(&drv_state.lock);
	return rc;
}
Example #30
0
struct net_device *__init
c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
            int irq0, int irq1)
{
    struct net_device *ndev;
    ci_t       *ci;

    ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
    if (!ndev)
    {
        pr_warning("%s: no memory for struct net_device !\n", hi->devname);
        error_flag = ENOMEM;
        return 0;
    }
    ci = (ci_t *)(netdev_priv(ndev));
    ndev->irq = irq0;

    ci->hdw_info = hi;
    ci->state = C_INIT;         /* mark as hardware not available */
    ci->next = c4_list;
    c4_list = ci;
    ci->brdno = ci->next ? ci->next->brdno + 1 : 0;

    if (CI == 0)
        CI = ci;                    /* DEBUG, only board 0 usage */

    strcpy (ci->devname, hi->devname);
    ci->release = &pmcc4_OSSI_release[0];

    /* tasklet */
#if defined(SBE_ISR_TASKLET)
    tasklet_init (&ci->ci_musycc_isr_tasklet,
                  (void (*) (unsigned long)) musycc_intr_bh_tasklet,
                  (unsigned long) ci);

    if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
        tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
#elif defined(SBE_ISR_IMMEDIATE)
    ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
    ci->ci_musycc_isr_tq.data = ci;
#endif


    if (register_netdev (ndev) ||
        (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
    {
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = ENODEV;
        return 0;
    }
    /*************************************************************
     *  int request_irq(unsigned int irq,
     *                  void (*handler)(int, void *, struct pt_regs *),
     *                  unsigned long flags, const char *dev_name, void *dev_id);
     *  wherein:
     *  irq      -> The interrupt number that is being requested.
     *  handler  -> Pointer to handling function being installed.
     *  flags    -> A bit mask of options related to interrupt management.
     *  dev_name -> String used in /proc/interrupts to show owner of interrupt.
     *  dev_id   -> Pointer (for shared interrupt lines) to point to its own
     *              private data area (to identify which device is interrupting).
     *
     *  extern void free_irq(unsigned int irq, void *dev_id);
     **************************************************************/

    if (request_irq (irq0, &c4_linux_interrupt,
                     IRQF_SHARED,
                     ndev->name, ndev))
    {
        pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
        unregister_netdev (ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#ifdef CONFIG_SBE_PMCC4_NCOMM
    if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
    {
        pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
        unregister_netdev (ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#endif

    /* setup board identification information */

    {
        u_int32_t   tmp;

        hdw_sn_get (hi, brdno);     /* also sets PROM format type (promfmt)
                                     * for later usage */

        switch (hi->promfmt)
        {
        case PROM_FORMAT_TYPE1:
            memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
            memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        case PROM_FORMAT_TYPE2:
            memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
            memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        default:
            ci->brd_id = 0;
            memset (ndev->dev_addr, 0, 6);
            break;
        }

#if 1
        sbeid_set_hdwbid (ci);      /* requires bid to be preset */
#else
        sbeid_set_bdtype (ci);      /* requires hdw_bid to be preset */
#endif

    }

#ifdef CONFIG_PROC_FS
    sbecom_proc_brd_init (ci);
#endif
#if defined(SBE_ISR_TASKLET)
    tasklet_enable (&ci->ci_musycc_isr_tasklet);
#endif


    if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
    {
#ifdef CONFIG_PROC_FS
        sbecom_proc_brd_cleanup (ci);
#endif
        unregister_netdev (ndev);
        free_irq (irq1, ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        return 0;                   /* failure, error_flag is set */
    }
    return ndev;
}