Exemple #1
0
static inline const char *get_task_state(struct task_struct *tsk)
{
	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array));
	return task_state_array[task_state_index(tsk)];
}
/**
 * Starts the watchdog
 * @param timeout - is in either ms or ticks
 */
static void StartWatchdog(uint32 timeout)
{
    /*lint -save -e923 Ok, that casting is done in Freescale header file  */
#if defined(CFG_MPC5567)
	(void)timeout;
	ECSM.SWTCR.R =  0x00D8;
#elif defined(CFG_MPC563XM)
	SWT.TO.R = timeout;/* Timeout in ticks */
	SWT.CR.R = 0x8000011BuL;
#elif defined(CFG_MPC5777M)
    SWT.CR.R = 0xE000011BuL; /* Access for all cores */
#elif defined(CFG_MPC5644A)
	SWT.TO.R = timeout;/* Timeout in ticks */
	SWT.MCR.R = 0x8000011BuL;
#elif defined(CFG_MPC5744P) || defined(CFG_MPC5645S) || defined(CFG_MPC5643L) || defined(CFG_SPC56XL70) || defined(CFG_MPC560X)
    /* Clear soft lock */
	SWT.SR.R = SOFTLOCK_WORD_1;     /* Write keys to clear soft lock bit */
    SWT.SR.R = SOFTLOCK_WORD_2;
#if defined(CFG_MPC5645S) || defined(CFG_MPC560X)
    /* Clocked by 128 kHz IRC clock */
    SWT.TO.R = timeout * 128u; /* Timeout in ms */
#else
    /* Clocked by 16 MHz IRC clock */
	SWT.TO.R = timeout * 16000u; /* Timeout in ms */
#endif
    /* Enable Watchdog */

	/*lint -e{970} Lint does not seem handle typeof well */
    SWT.CR.B = (typeof(SWT.CR.B)){.RIA = 1, .SLK = 1, .FRZ = 1, .WEN = 1, .MAP0 = 1};
#elif defined(CFG_MPC5668)
	/* Clocked by 16 MHz IRC clock */

	/* Clear softlock */
	WRITE32(SWT_BASE + SWT_SR, SOFTLOCK_WORD_1);
	WRITE32(SWT_BASE + SWT_SR, SOFTLOCK_WORD_2);

	/* Write TMO */
	WRITE32(SWT_BASE + SWT_TO, timeout * 16000 ); /* Timeout in ms */

	/* Enable Watchdog */
	WRITE32(SWT_BASE + SWT_CR,0x80000000UL + CR_RIA + CR_SLK + CR_CSL + CR_FRZ + CR_WEN);

#elif defined(CFG_MPC5516)
	/* We running on system clock, ie SIU_SYSCLK.SWTCLKSEL,  so get the value */

	/* The timeout is 2^x, so get best possible value
	 *   Table for 80Mhz
	 *   ------------------
	 *   2^9  = 512 = 6.4 uS
	 *   2^15 =       400 uS
	 *   2^20 =       13 mS
	 *   2^28 =       3.3 S
	 *   2^31 =       26,84 S
	 *
	 * Formula:
	 *   1/clock * 2^n  = tmo_in_s
	 *   2^n = tmo_in_s * clock -> n = log2(tmo_in_s * clock) = log2(tmo_in_ms * clock / 1000 )
	 *  */
	uint32 swtVal = ilog2( Mcu_Arc_GetSystemClock()/1000 * timeout ); /* Timeout in ms */
#if defined(CFG_WDG_TEST)
	MCM.SWTCR.R = (SWTCR_SWE + SWTCR_SWRI(WDG_SWRI_VAL) + swtVal);
#else
	MCM.SWTCR.R = (SWTCR_SWE + SWTCR_SWRI(2) + swtVal);
#endif

#else
	MCM.SWTCR.R = 0x00D8u;
#endif
	/*lint -restore */
}


/**
 * Stops the watchdog
 */
static void StopWatchdog(void)
{
    /*lint -save -e923 Ok, the casting is done in Freesacale header file  */
#if defined(CFG_MPC5567)
 	ECSM.SWTCR.R =  0x0059u;
#elif defined(CFG_MPC560X) || defined(CFG_MPC5668) || defined(CFG_MPC563XM) || defined(CFG_MPC5744P) || defined(CFG_MPC5645S) || defined(CFG_MPC5643L) || defined(CFG_SPC56XL70)
 	SWT.SR.R = SOFTLOCK_WORD_1;     /* Write keys to clear soft lock bit */
 	SWT.SR.R = SOFTLOCK_WORD_2;
 	/*lint -e{970} Lint does not seem handle typeof well */
 	SWT.CR.B = (typeof(SWT.CR.B)){.RIA = 1, .FRZ = 1, .WEN = 0, .MAP0 = 1};
#elif defined(CFG_MPC5777M)
    SWT.SR.R = SOFTLOCK_WORD_1;     /* Write keys to clear soft lock bit */
    SWT.SR.R = SOFTLOCK_WORD_2;
    SWT.CR.R = 0xE000010AuL; /* Access for all cores */
#elif defined(CFG_MPC5644A)
    SWT.SR.R = SOFTLOCK_WORD_1;     /* Write keys to clear soft lock bit */
    SWT.SR.R = SOFTLOCK_WORD_2;
    SWT.MCR.B = (typeof(SWT.MCR.B)){.RIA = 1, .FRZ = 1, .WEN = 0, .MAP0 = 1};

#elif defined(CFG_MPC5516)
 	MCM.SWTCR.R = 0x0u;	  /* Disable the watchdog */
#else
	MCM.SWTCR.R = 0x0059u;
#endif
	/*lint -restore */
}


#if (WDG_VERSION_INFO_API == STD_ON)
void Wdg_GetVersionInfo(Std_VersionInfoType* versioninfo)
{
	/* @req SWS_Wdg_00174 */
    VALIDATE(versioninfo != NULL, WDG_GET_VERSION_INFO_SERVICE_ID, WDG_E_PARAM_POINTER);

    versioninfo->vendorID = WDG_VENDOR_ID;
    versioninfo->moduleID = WDG_MODULE_ID;
    versioninfo->sw_major_version = WDG_SW_MAJOR_VERSION;
    versioninfo->sw_minor_version = WDG_SW_MINOR_VERSION;
    versioninfo->sw_patch_version = WDG_SW_PATCH_VERSION;
}
#endif


void Wdg_Init (const Wdg_ConfigType* ConfigPtr)
{
    /* @req SWS_Wdg_00001 */
    /* @req SWS_Wdg_00100 */
    /* @req SWS_Wdg_00101 */

    /*@req SWS_Wdg_00089 */
    VALIDATE(ConfigPtr != NULL, WDG_INIT_SERVICE_ID, WDG_E_PARAM_POINTER);

    /* @req SWS_Wdg_00010 */
    VALIDATE(runTimeData.configWdgPtr == NULL, WDG_INIT_SERVICE_ID, WDG_E_DRIVER_STATE);

    /* Keep a pointer to the config. */
    /* @req SWS_Wdg_00019 Always set regardless of DET or debugging support */
    runTimeData.configWdgPtr = ConfigPtr;

	/* Ignoring return value, DET error set in Wdg_SetMode */
    (void)Wdg_SetMode(ConfigPtr->Wdg_ModeConfig->Wdg_DefaultMode);
}
Exemple #3
0
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
		     struct netlink_ext_ack *extack)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_FQ_MAX + 1];
	int err, drop_count = 0;
	unsigned drop_len = 0;
	u32 fq_log;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	fq_log = q->fq_trees_log;

	if (tb[TCA_FQ_BUCKETS_LOG]) {
		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);

		if (nval >= 1 && nval <= ilog2(256*1024))
			fq_log = nval;
		else
			err = -EINVAL;
	}
	if (tb[TCA_FQ_PLIMIT])
		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);

	if (tb[TCA_FQ_FLOW_PLIMIT])
		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);

	if (tb[TCA_FQ_QUANTUM]) {
		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);

		if (quantum > 0)
			q->quantum = quantum;
		else
			err = -EINVAL;
	}

	if (tb[TCA_FQ_INITIAL_QUANTUM])
		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);

	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));

	if (tb[TCA_FQ_FLOW_MAX_RATE])
		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);

	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
		q->low_rate_threshold =
			nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);

	if (tb[TCA_FQ_RATE_ENABLE]) {
		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);

		if (enable <= 1)
			q->rate_enable = enable;
		else
			err = -EINVAL;
	}

	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;

		q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
	}

	if (tb[TCA_FQ_ORPHAN_MASK])
		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);

	if (!err) {
		sch_tree_unlock(sch);
		err = fq_resize(sch, fq_log);
		sch_tree_lock(sch);
	}
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = fq_dequeue(sch);

		if (!skb)
			break;
		drop_len += qdisc_pkt_len(skb);
		rtnl_kfree_skbs(skb, skb);
		drop_count++;
	}
	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);

	sch_tree_unlock(sch);
	return err;
}
Exemple #4
0
int
zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent)
{
	const char *osname = zmo->z_osname;
	zfs_sb_t *zsb;
	struct inode *root_inode;
	uint64_t recordsize;
	int error;

	error = zfs_sb_create(osname, zmo, &zsb);
	if (error)
		return (error);

	if ((error = dsl_prop_get_integer(osname, "recordsize",
	    &recordsize, NULL)))
		goto out;

	zsb->z_sb = sb;
	sb->s_fs_info = zsb;
	sb->s_magic = ZFS_SUPER_MAGIC;
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	sb->s_time_gran = 1;
	sb->s_blocksize = recordsize;
	sb->s_blocksize_bits = ilog2(recordsize);
	zsb->z_bdi.ra_pages = 0;
	sb->s_bdi = &zsb->z_bdi;

	error = -zpl_bdi_setup_and_register(&zsb->z_bdi, "zfs");
	if (error)
		goto out;

	/* Set callback operations for the file system. */
	sb->s_op = &zpl_super_operations;
	sb->s_xattr = zpl_xattr_handlers;
	sb->s_export_op = &zpl_export_operations;
#ifdef HAVE_S_D_OP
	sb->s_d_op = &zpl_dentry_operations;
#endif /* HAVE_S_D_OP */

	/* Set features for file system. */
	zfs_set_fuid_feature(zsb);

	if (dmu_objset_is_snapshot(zsb->z_os)) {
		uint64_t pval;

		atime_changed_cb(zsb, B_FALSE);
		readonly_changed_cb(zsb, B_TRUE);
		if ((error = dsl_prop_get_integer(osname,
		    "xattr", &pval, NULL)))
			goto out;
		xattr_changed_cb(zsb, pval);
		if ((error = dsl_prop_get_integer(osname,
		    "acltype", &pval, NULL)))
			goto out;
		acltype_changed_cb(zsb, pval);
		zsb->z_issnap = B_TRUE;
		zsb->z_os->os_sync = ZFS_SYNC_DISABLED;
		zsb->z_snap_defer_time = jiffies;

		mutex_enter(&zsb->z_os->os_user_ptr_lock);
		dmu_objset_set_user(zsb->z_os, zsb);
		mutex_exit(&zsb->z_os->os_user_ptr_lock);
	} else {
		if ((error = zfs_sb_setup(zsb, B_TRUE)))
			goto out;
	}

	/* Allocate a root inode for the filesystem. */
	error = zfs_root(zsb, &root_inode);
	if (error) {
		(void) zfs_umount(sb);
		goto out;
	}

	/* Allocate a root dentry for the filesystem */
	sb->s_root = d_make_root(root_inode);
	if (sb->s_root == NULL) {
		(void) zfs_umount(sb);
		error = SET_ERROR(ENOMEM);
		goto out;
	}

	if (!zsb->z_issnap)
		zfsctl_create(zsb);

	zsb->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
	if (error) {
		dmu_objset_disown(zsb->z_os, zsb);
		zfs_sb_free(zsb);
		/*
		 * make sure we don't have dangling sb->s_fs_info which
		 * zfs_preumount will use.
		 */
		sb->s_fs_info = NULL;
	}

	return (error);
}
Exemple #5
0
void *fft_new(Symbol *s, short ac, Atom *av)
{
	t_fft *x;
	float *buf;
	long i;
	long bs = sys_getblksize();
	
	long fftsize, hop;
	
	if ((ac > 0) && (av[0].a_type == A_LONG))
		fftsize = av[0].a_w.w_long;
	else
		fftsize = FFT_DEFAULT_POINTS;
		
	if (fftsize != (1 << ilog2(fftsize))) {
		fftsize = 1 << ilog2(fftsize);
		error("fftÅ: power of two required for fft size - using %ld", fftsize);
	}
	
	if ((ac > 1) && (av[1].a_type == A_LONG))
		hop = av[1].a_w.w_long;
	else
		hop = 2;
	
	if ( hop != 2 ) {
		error("fftÅ: overlap must be 2 or 4 right now - setting to 2");
		hop = 2;
	}
	
	if (fftsize < FFT_MIN_POINTS)
		error("fftÅ: minimum size %ld", fftsize = FFT_MIN_POINTS);
	else if (fftsize > FFT_MAX_POINTS)
		error("fftÅ: maximum size %ld", fftsize = FFT_MAX_POINTS);

	hop = fftsize / hop; // hop is now defined in samples instead of x overlap
	
	// this is not very good well done because overlap should be 2 4 or 8
	if (hop < bs){
		hop = bs;
		error("fftÅ: HOP must be multiple of %ld, setting to %ld", bs,hop);
	}
	
	x = (t_fft *)newobject(fft_class);
	x->x_fftsize = fftsize;
	x->x_hop = hop;
	
	dsp_setup((t_pxobject *)x,2);
	x->x_obj.z_misc = Z_NO_INPLACE;
	x->x_realin = t_getbytes(fftsize * sizeof(float));
	x->x_imagin = t_getbytes(fftsize * sizeof(float));
	x->x_realout = t_getbytes(fftsize * sizeof(float));
	x->x_imagout = t_getbytes(fftsize * sizeof(float));
	x->x_window = t_getbytes(fftsize * sizeof(float));
	
	for (i=0; i < fftsize; i++) {
		x->x_realin[i] = 0;
		x->x_realout[i] = 0;
		x->x_imagin[i] = 0;
		x->x_imagout[i] = 0;
	}
	for (i=0; i < fftsize; i++) { 
		// hanning window with sqrt for two overlap
		x->x_window[i] = sqrt(0.5 * (1. + cos(3.14159 + 3.14159 * 2.* i/fftsize)));
	}
	x->x_realinptr = x->x_realin;
	x->x_realoutptr = x->x_realout;
	x->x_imaginptr = x->x_imagin;
	x->x_imagoutptr = x->x_imagout;

	x->x_1overpts = 1. / x->x_fftsize;
	
	//x->x_outlet2 = outlet_new((t_pxobject *)x, 0L);
	x->x_outlet1 = outlet_new(x, "signal");
	x->x_outlet = outlet_new(x, "signal");
	
	return (x);
}
void Sample_TileMesh::handleSettings()
{
	Sample::handleCommonSettings();

	if (imguiCheck("Keep Itermediate Results", m_keepInterResults))
		m_keepInterResults = !m_keepInterResults;

	if (imguiCheck("Build All Tiles", m_buildAll))
		m_buildAll = !m_buildAll;
	
	imguiLabel("Tiling");
	imguiSlider("TileSize", &m_tileSize, 16.0f, 1024.0f, 16.0f);
	
	if (m_geom)
	{
		const float* bmin = m_geom->getMeshBoundsMin();
		const float* bmax = m_geom->getMeshBoundsMax();
		char text[64];
		int gw = 0, gh = 0;
		rcCalcGridSize(bmin, bmax, m_cellSize, &gw, &gh);
		const int ts = (int)m_tileSize;
		const int tw = (gw + ts-1) / ts;
		const int th = (gh + ts-1) / ts;
		snprintf(text, 64, "Tiles  %d x %d", tw, th);
		imguiValue(text);

		// Max tiles and max polys affect how the tile IDs are caculated.
		// There are 22 bits available for identifying a tile and a polygon.
		int tileBits = rcMin((int)ilog2(nextPow2(tw*th)), 14);
		if (tileBits > 14) tileBits = 14;
		int polyBits = 22 - tileBits;
		m_maxTiles = 1 << tileBits;
		m_maxPolysPerTile = 1 << polyBits;
		snprintf(text, 64, "Max Tiles  %d", m_maxTiles);
		imguiValue(text);
		snprintf(text, 64, "Max Polys  %d", m_maxPolysPerTile);
		imguiValue(text);
	}
	else
	{
		m_maxTiles = 0;
		m_maxPolysPerTile = 0;
	}
	
	imguiSeparator();
	
	imguiIndent();
	imguiIndent();
	
	if (imguiButton("Save"))
	{
		saveAll("all_tiles_navmesh.bin", m_navMesh);
	}

	if (imguiButton("Load"))
	{
		dtFreeNavMesh(m_navMesh);
		m_navMesh = loadAll("all_tiles_navmesh.bin");
		m_navQuery->init(m_navMesh, 2048);
	}

	imguiUnindent();
	imguiUnindent();
	
	char msg[64];
	snprintf(msg, 64, "Build Time: %.1fms", m_totalBuildTimeMs);
	imguiLabel(msg);
	
	imguiSeparator();
	
	imguiSeparator();
	
}
Exemple #7
0
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
		  unsigned vector, int collapsed)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_cq_table *cq_table = &priv->cq_table;
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_cq_context *cq_context;
	u64 mtt_addr;
	int err;

	if (vector >= dev->caps.num_comp_vectors)
		return -EINVAL;

	cq->vector = vector;

	cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
	if (cq->cqn == -1)
		return -ENOMEM;

	err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
	if (err)
		goto err_out;

	err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
	if (err)
		goto err_put;

	spin_lock_irq(&cq_table->lock);
	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
	spin_unlock_irq(&cq_table->lock);
	if (err)
		goto err_cmpt_put;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox)) {
		err = PTR_ERR(mailbox);
		goto err_radix;
	}

	cq_context = mailbox->buf;
	memset(cq_context, 0, sizeof *cq_context);

	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
	cq_context->comp_eqn	    = priv->eq_table.eq[vector].eqn;
	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;

	mtt_addr = mlx4_mtt_addr(dev, mtt);
	cq_context->mtt_base_addr_h = mtt_addr >> 32;
	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
	cq_context->db_rec_addr     = cpu_to_be64(db_rec);

	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
	mlx4_free_cmd_mailbox(dev, mailbox);
	if (err)
		goto err_radix;

	cq->cons_index = 0;
	cq->arm_sn     = 1;
	cq->uar        = uar;
	atomic_set(&cq->refcount, 1);
	init_completion(&cq->free);

	return 0;

err_radix:
	spin_lock_irq(&cq_table->lock);
	radix_tree_delete(&cq_table->tree, cq->cqn);
	spin_unlock_irq(&cq_table->lock);

err_cmpt_put:
	mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);

err_put:
	mlx4_table_put(dev, &cq_table->table, cq->cqn);

err_out:
	mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);

	return err;
}
Exemple #8
0
// leaf 0x0000000B //
void bx_generic_cpuid_t::get_std_cpuid_extended_topology_leaf(Bit32u subfunction, cpuid_function_t *leaf) const
{
  // CPUID function 0x0000000B - Extended Topology Leaf
  leaf->eax = 0;
  leaf->ebx = 0;
  leaf->ecx = subfunction;
  leaf->edx = cpu->get_apic_id();

#if BX_SUPPORT_SMP
  switch(subfunction) {
  case 0:
     if (nthreads > 1) {
        leaf->eax = ilog2(nthreads-1)+1;
        leaf->ebx = nthreads;
        leaf->ecx |= (1<<8);
     }
     else if (ncores > 1) {
        leaf->eax = ilog2(ncores-1)+1;
        leaf->ebx = ncores;
        leaf->ecx |= (2<<8);
     }
     else if (nprocessors > 1) {
        leaf->eax = ilog2(nprocessors-1)+1;
        leaf->ebx = nprocessors;
     }
     else {
        leaf->eax = 1;
        leaf->ebx = 1; // number of logical CPUs at this level
     }
     break;

  case 1:
     if (nthreads > 1) {
        if (ncores > 1) {
           leaf->eax = ilog2(ncores-1)+1;
           leaf->ebx = ncores;
           leaf->ecx |= (2<<8);
        }
        else if (nprocessors > 1) {
           leaf->eax = ilog2(nprocessors-1)+1;
           leaf->ebx = nprocessors;
        }
     }
     else if (ncores > 1) {
        if (nprocessors > 1) {
           leaf->eax = ilog2(nprocessors-1)+1;
           leaf->ebx = nprocessors;
        }
     }
     break;

  case 2:
     if (nthreads > 1) {
        if (nprocessors > 1) {
           leaf->eax = ilog2(nprocessors-1)+1;
           leaf->ebx = nprocessors;
        }
     }
     break;

  default:
     break;
  }
#endif
}
     * Use our actual MCK to figure out how many MCK/16 ticks per
     * 1/HZ period (instead of a compile-time constant LATCH).
     */
    pit_rate = clk_get_rate(clk_get(NULL, "mck")) / 16;
    pit_cycle = (pit_rate + HZ/2) / HZ;
    WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0);

    /* Initialize and enable the timer */
    at91sam926x_pit_reset();

    /*
     * Register clocksource.  The high order bits of PIV are unused,
     * so this isn't a 32-bit counter unless we get clockevent irqs.
     */
    <<<<<<< HEAD
    bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
    pit_clk.mask = CLOCKSOURCE_MASK(bits);
    clocksource_register_hz(&pit_clk, pit_rate);
    =======
        pit_clk.mult = clocksource_hz2mult(pit_rate, pit_clk.shift);
    bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
    pit_clk.mask = CLOCKSOURCE_MASK(bits);
    clocksource_register(&pit_clk);
    >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a

    /* Set up irq handler */
    setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);

    /* Set up and register clockevents */
    pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
    pit_clkevt.cpumask = cpumask_of(0);
MVSuper::MVSuper(PClip _child, int _hPad, int _vPad, int _pel, int _levels, bool _chroma, int _sharp, int _rfilter, PClip _pelclip,
					 bool _isse, bool _planar, IScriptEnvironment* env) :
GenericVideoFilter(_child),
pelclip(_pelclip)
{
    planar = _planar;

	nWidth = vi.width;

	nHeight = vi.height;

	if (!vi.IsYV12() && !vi.IsYUY2())
		env->ThrowError("MSuper: Clip must be YV12 or YUY2");

   nPel = _pel;
	if (( nPel != 1 ) && ( nPel != 2 ) && ( nPel != 4 ))
		env->ThrowError("MSuper: pel has to be 1 or 2 or 4");

   nHPad = _hPad;
   nVPad = _vPad;
   rfilter = _rfilter;
	sharp = _sharp; // pel2 interpolation type
	isse = _isse;

	chroma = _chroma;
    nModeYUV = chroma ? YUVPLANES : YPLANE;

    pixelType = vi.pixel_type;
	yRatioUV = (vi.IsYV12()) ? 2 : 1;
	xRatioUV = 2; // for YV12 and YUY2, really do not used and assumed to 2

    nLevels = _levels;
	int minsize = (nWidth/xRatioUV < nHeight/yRatioUV) ? nWidth/xRatioUV : nHeight/yRatioUV;
	int nLevelsMax = ilog2(minsize);
	if (nLevels<=0 || nLevels> nLevelsMax) nLevels = nLevelsMax;

	usePelClip = false;
   if (pelclip && (nPel >= 2))
   {
		if (pelclip->GetVideoInfo().width == vi.width*nPel &&
		    pelclip->GetVideoInfo().height == vi.height*nPel)
		{
			usePelClip = true;
			isPelClipPadded = false;
		}
		else if (pelclip->GetVideoInfo().width == (vi.width + nHPad*2)*nPel &&
                 pelclip->GetVideoInfo().height == (vi.height+ nVPad*2)*nPel)
		{
			usePelClip = true;
			isPelClipPadded = true;
		}
		else
			env->ThrowError("MSuper: pelclip frame size must be Pel of source!");
   }

    nSuperWidth = nWidth + 2*nHPad;
    nSuperHeight = PlaneSuperOffset(nHeight, nLevels, nPel, nVPad, nSuperWidth)/nSuperWidth;
    if (yRatioUV==2 && nSuperHeight&1) nSuperHeight++; // even
    vi.width = nSuperWidth;
    vi.height = nSuperHeight;

   if ( (pixelType & VideoInfo::CS_YUY2) == VideoInfo::CS_YUY2 && !planar)
   {
		SrcPlanes =  new YUY2Planes(nWidth, nHeight);
//		DstPlanes =  new YUY2Planes(nSuperWidth, nSuperHeight); // other size!
		if (usePelClip)
		{
			SrcPelPlanes =  new YUY2Planes(pelclip->GetVideoInfo().width, pelclip->GetVideoInfo().height);
		}
   }

    SuperParams64Bits params;

    params.nHeight = nHeight;
    params.nHPad = nHPad;
    params.nVPad = nVPad;
    params.nPel = nPel;
    params.nModeYUV = nModeYUV;
    params.nLevels = nLevels;


    // pack parameters to fake audio properties
    memcpy(&vi.num_audio_samples, &params, 8); //nHeight + (nHPad<<16) + (nVPad<<24) + ((_int64)(nPel)<<32) + ((_int64)nModeYUV<<40) + ((_int64)nLevels<<48);
    vi.audio_samples_per_second = 0; // kill audio

//    pSrcGOF = new MVGroupOfFrames(nLevels, nWidth, nHeight, nPel, nHPad, nVPad, nModeYUV, isse, yRatioUV);
    pSrcGOF = new MVGroupOfFrames(nLevels, nWidth, nHeight, nPel, nHPad, nVPad, YUVPLANES, isse, yRatioUV);

   PROFILE_INIT();

}
Exemple #11
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
    struct buffer_head * bh;
    struct ext2_sb_info * sbi;
    struct ext2_super_block * es;
    struct inode *root;
    unsigned long block;
    unsigned long sb_block = get_sb_block(&data);
    unsigned long logic_sb_block;
    unsigned long offset = 0;
    unsigned long def_mount_opts;
    int blocksize = BLOCK_SIZE;
    int db_count;
    int i, j;
    __le32 features;

    sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
    if (!sbi)
        return -ENOMEM;
    sb->s_fs_info = sbi;

    /*
     * See what the current blocksize for the device is, and
     * use that as the blocksize.  Otherwise (or if the blocksize
     * is smaller than the default) use the default.
     * This is important for devices that have a hardware
     * sectorsize that is larger than the default.
     */
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    if (!blocksize) {
        printk ("EXT2-fs: unable to set blocksize\n");
        goto failed_sbi;
    }

    /*
     * If the superblock doesn't start on a hardware sector boundary,
     * calculate the offset.
     */
    if (blocksize != BLOCK_SIZE) {
        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
    } else {
        logic_sb_block = sb_block;
    }

    if (!(bh = sb_bread(sb, logic_sb_block))) {
        printk ("EXT2-fs: unable to read superblock\n");
        goto failed_sbi;
    }
    /*
     * Note: s_es must be initialized as soon as possible because
     *       some ext2 macro-instructions depend on its value
     */
    es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
    sbi->s_es = es;
    sb->s_magic = le16_to_cpu(es->s_magic);

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    /* Set defaults before we parse the mount options */
    def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
    if (def_mount_opts & EXT2_DEFM_DEBUG)
        set_opt(sbi->s_mount_opt, DEBUG);
    if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
        set_opt(sbi->s_mount_opt, GRPID);
    if (def_mount_opts & EXT2_DEFM_UID16)
        set_opt(sbi->s_mount_opt, NO_UID32);
    if (def_mount_opts & EXT2_DEFM_XATTR_USER)
        set_opt(sbi->s_mount_opt, XATTR_USER);
    if (def_mount_opts & EXT2_DEFM_ACL)
        set_opt(sbi->s_mount_opt, POSIX_ACL);

    if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
        set_opt(sbi->s_mount_opt, ERRORS_PANIC);
    else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
        set_opt(sbi->s_mount_opt, ERRORS_RO);
    else
        set_opt(sbi->s_mount_opt, ERRORS_CONT);

    sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
    sbi->s_resgid = le16_to_cpu(es->s_def_resgid);

    if (!parse_options ((char *) data, sbi))
        goto failed_mount;

    sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                  ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
                   MS_POSIXACL : 0);

    ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
            (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
             EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
        printk("EXT2-fs warning: feature flags set on rev 0 fs, "
               "running e2fsck is recommended\n");
    /*
     * Check feature flags regardless of the revision level, since we
     * previously didn't change the revision level when setting the flags,
     * so there is a chance incompat flags are set on a rev 0 filesystem.
     */
    features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
    if (features) {
        printk("EXT2-fs: %s: couldn't mount because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }
    if (!(sb->s_flags & MS_RDONLY) &&
            (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))) {
        printk("EXT2-fs: %s: couldn't mount RDWR because of "
               "unsupported optional features (%x).\n",
               sb->s_id, le32_to_cpu(features));
        goto failed_mount;
    }

    blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

    if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) ||
                               (sb->s_blocksize != blocksize))) {
        if (!silent)
            printk("XIP: Unsupported blocksize\n");
        goto failed_mount;
    }

    /* If the blocksize doesn't match, re-read the thing.. */
    if (sb->s_blocksize != blocksize) {
        brelse(bh);

        if (!sb_set_blocksize(sb, blocksize)) {
            printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
            goto failed_sbi;
        }

        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        if(!bh) {
            printk("EXT2-fs: Couldn't read superblock on "
                   "2nd try.\n");
            goto failed_sbi;
        }
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sbi->s_es = es;
        if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
            printk ("EXT2-fs: Magic mismatch, very weird !\n");
            goto failed_mount;
        }
    }

    sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);

    if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
        sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
        sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
    } else {
        sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
        sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
        if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
                (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
                (sbi->s_inode_size > blocksize)) {
            printk ("EXT2-fs: unsupported inode size: %d\n",
                    sbi->s_inode_size);
            goto failed_mount;
        }
    }

    sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
                       le32_to_cpu(es->s_log_frag_size);
    if (sbi->s_frag_size == 0)
        goto cantfind_ext2;
    sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

    sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
    sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
    sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

    if (EXT2_INODE_SIZE(sb) == 0)
        goto cantfind_ext2;
    sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
    if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
        goto cantfind_ext2;
    sbi->s_itb_per_group = sbi->s_inodes_per_group /
                           sbi->s_inodes_per_block;
    sbi->s_desc_per_block = sb->s_blocksize /
                            sizeof (struct ext2_group_desc);
    sbi->s_sbh = bh;
    sbi->s_mount_state = le16_to_cpu(es->s_state);
    sbi->s_addr_per_block_bits =
        ilog2 (EXT2_ADDR_PER_BLOCK(sb));
    sbi->s_desc_per_block_bits =
        ilog2 (EXT2_DESC_PER_BLOCK(sb));

    if (sb->s_magic != EXT2_SUPER_MAGIC)
        goto cantfind_ext2;

    if (sb->s_blocksize != bh->b_size) {
        if (!silent)
            printk ("VFS: Unsupported blocksize on dev "
                    "%s.\n", sb->s_id);
        goto failed_mount;
    }

    if (sb->s_blocksize != sbi->s_frag_size) {
        printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
                sbi->s_frag_size, sb->s_blocksize);
        goto failed_mount;
    }

    if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #blocks per group too big: %lu\n",
                sbi->s_blocks_per_group);
        goto failed_mount;
    }
    if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #fragments per group too big: %lu\n",
                sbi->s_frags_per_group);
        goto failed_mount;
    }
    if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
        printk ("EXT2-fs: #inodes per group too big: %lu\n",
                sbi->s_inodes_per_group);
        goto failed_mount;
    }

    if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
        goto cantfind_ext2;
    sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                            le32_to_cpu(es->s_first_data_block) - 1)
                           / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
    db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
               EXT2_DESC_PER_BLOCK(sb);
    sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    if (sbi->s_group_desc == NULL) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount;
    }
    bgl_lock_init(&sbi->s_blockgroup_lock);
    sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
                           GFP_KERNEL);
    if (!sbi->s_debts) {
        printk ("EXT2-fs: not enough memory\n");
        goto failed_mount_group_desc;
    }
    memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
    for (i = 0; i < db_count; i++) {
        block = descriptor_loc(sb, logic_sb_block, i);
        sbi->s_group_desc[i] = sb_bread(sb, block);
        if (!sbi->s_group_desc[i]) {
            for (j = 0; j < i; j++)
                brelse (sbi->s_group_desc[j]);
            printk ("EXT2-fs: unable to read group descriptors\n");
            goto failed_mount_group_desc;
        }
    }
    if (!ext2_check_descriptors (sb)) {
        printk ("EXT2-fs: group descriptors corrupted!\n");
        goto failed_mount2;
    }
    sbi->s_gdb_count = db_count;
    get_random_bytes(&sbi->s_next_generation, sizeof(u32));
    spin_lock_init(&sbi->s_next_gen_lock);

    percpu_counter_init(&sbi->s_freeblocks_counter,
                        ext2_count_free_blocks(sb));
    percpu_counter_init(&sbi->s_freeinodes_counter,
                        ext2_count_free_inodes(sb));
    percpu_counter_init(&sbi->s_dirs_counter,
                        ext2_count_dirs(sb));
    /*
     * set up enough so that it can read an inode
     */
    sb->s_op = &ext2_sops;
    sb->s_export_op = &ext2_export_ops;
    sb->s_xattr = ext2_xattr_handlers;
    root = iget(sb, EXT2_ROOT_INO);
    sb->s_root = d_alloc_root(root);
    if (!sb->s_root) {
        iput(root);
        printk(KERN_ERR "EXT2-fs: get root inode failed\n");
        goto failed_mount3;
    }
    if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
        dput(sb->s_root);
        sb->s_root = NULL;
        printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
        goto failed_mount3;
    }
    if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
        ext2_warning(sb, __FUNCTION__,
                     "mounting ext3 filesystem as ext2");
    ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
    return 0;

cantfind_ext2:
    if (!silent)
        printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
               sb->s_id);
    goto failed_mount;
failed_mount3:
    percpu_counter_destroy(&sbi->s_freeblocks_counter);
    percpu_counter_destroy(&sbi->s_freeinodes_counter);
    percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
    for (i = 0; i < db_count; i++)
        brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
    kfree(sbi->s_group_desc);
    kfree(sbi->s_debts);
failed_mount:
    brelse(bh);
failed_sbi:
    sb->s_fs_info = NULL;
    kfree(sbi);
    return -EINVAL;
}
Exemple #12
0
static void mxs_mmc_adtc(struct mxs_mmc_host *host)
{
	struct mmc_command *cmd = host->cmd;
	struct mmc_data *data = cmd->data;
	struct dma_async_tx_descriptor *desc;
	struct scatterlist *sgl = data->sg, *sg;
	unsigned int sg_len = data->sg_len;
	unsigned int i;

	unsigned short dma_data_dir, timeout;
	enum dma_transfer_direction slave_dirn;
	unsigned int data_size = 0, log2_blksz;
	unsigned int blocks = data->blocks;

	struct mxs_ssp *ssp = &host->ssp;

	u32 ignore_crc, get_resp, long_resp, read;
	u32 ctrl0, cmd0, cmd1, val;

	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
			0 : BM_SSP_CTRL0_IGNORE_CRC;
	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
			BM_SSP_CTRL0_GET_RESP : 0;
	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
			BM_SSP_CTRL0_LONG_RESP : 0;

	if (data->flags & MMC_DATA_WRITE) {
		dma_data_dir = DMA_TO_DEVICE;
		slave_dirn = DMA_MEM_TO_DEV;
		read = 0;
	} else {
		dma_data_dir = DMA_FROM_DEVICE;
		slave_dirn = DMA_DEV_TO_MEM;
		read = BM_SSP_CTRL0_READ;
	}

	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
		ignore_crc | get_resp | long_resp |
		BM_SSP_CTRL0_DATA_XFER | read |
		BM_SSP_CTRL0_WAIT_FOR_IRQ |
		BM_SSP_CTRL0_ENABLE;

	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);

	/* get logarithm to base 2 of block size for setting register */
	log2_blksz = ilog2(data->blksz);

	/*
	 * take special care of the case that data size from data->sg
	 * is not equal to blocks x blksz
	 */
	for_each_sg(sgl, sg, sg_len, i)
		data_size += sg->length;

	if (data_size != data->blocks * data->blksz)
		blocks = 1;

	/* xfer count, block size and count need to be set differently */
	if (ssp_is_old(ssp)) {
		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
	} else {
		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
		       ssp->base + HW_SSP_BLOCK_SIZE);
	}

	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
	    (cmd->opcode == SD_IO_RW_EXTENDED))
		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;

	cmd1 = cmd->arg;

	if (host->sdio_irq_en) {
		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
	}

	/* set the timeout count */
	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
	val = readl(ssp->base + HW_SSP_TIMING(ssp));
	val &= ~(BM_SSP_TIMING_TIMEOUT);
	val |= BF_SSP(timeout, TIMING_TIMEOUT);
	writel(val, ssp->base + HW_SSP_TIMING(ssp));

	/* pio */
	ssp->ssp_pio_words[0] = ctrl0;
	ssp->ssp_pio_words[1] = cmd0;
	ssp->ssp_pio_words[2] = cmd1;
	ssp->dma_dir = DMA_NONE;
	ssp->slave_dirn = DMA_TRANS_NONE;
	desc = mxs_mmc_prep_dma(host, 0);
	if (!desc)
		goto out;

	/* append data sg */
	WARN_ON(host->data != NULL);
	host->data = data;
	ssp->dma_dir = dma_data_dir;
	ssp->slave_dirn = slave_dirn;
	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		goto out;

	dmaengine_submit(desc);
	dma_async_issue_pending(ssp->dmach);
	return;
out:
	dev_warn(mmc_dev(host->mmc),
		 "%s: failed to prep dma\n", __func__);
}
Exemple #13
0
static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350)
{
	struct device_node *np = dev->of_node;
	struct sta350_platform_data *pdata;
	const char *ffx_power_mode;
	u16 tmp;
	u8 tmp8;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	of_property_read_u8(np, "st,output-conf",
			    &pdata->output_conf);
	of_property_read_u8(np, "st,ch1-output-mapping",
			    &pdata->ch1_output_mapping);
	of_property_read_u8(np, "st,ch2-output-mapping",
			    &pdata->ch2_output_mapping);
	of_property_read_u8(np, "st,ch3-output-mapping",
			    &pdata->ch3_output_mapping);

	if (of_get_property(np, "st,thermal-warning-recovery", NULL))
		pdata->thermal_warning_recovery = 1;
	if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
		pdata->thermal_warning_adjustment = 1;
	if (of_get_property(np, "st,fault-detect-recovery", NULL))
		pdata->fault_detect_recovery = 1;

	pdata->ffx_power_output_mode = STA350_FFX_PM_VARIABLE_DROP_COMP;
	if (!of_property_read_string(np, "st,ffx-power-output-mode",
				     &ffx_power_mode)) {
		int i, mode = -EINVAL;

		for (i = 0; i < ARRAY_SIZE(sta350_ffx_modes); i++)
			if (!strcasecmp(ffx_power_mode, sta350_ffx_modes[i]))
				mode = i;

		if (mode < 0)
			dev_warn(dev, "Unsupported ffx output mode: %s\n",
				 ffx_power_mode);
		else
			pdata->ffx_power_output_mode = mode;
	}

	tmp = 140;
	of_property_read_u16(np, "st,drop-compensation-ns", &tmp);
	pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20;

	if (of_get_property(np, "st,overcurrent-warning-adjustment", NULL))
		pdata->oc_warning_adjustment = 1;

	/* CONFE */
	if (of_get_property(np, "st,max-power-use-mpcc", NULL))
		pdata->max_power_use_mpcc = 1;

	if (of_get_property(np, "st,max-power-correction", NULL))
		pdata->max_power_correction = 1;

	if (of_get_property(np, "st,am-reduction-mode", NULL))
		pdata->am_reduction_mode = 1;

	if (of_get_property(np, "st,odd-pwm-speed-mode", NULL))
		pdata->odd_pwm_speed_mode = 1;

	if (of_get_property(np, "st,distortion-compensation", NULL))
		pdata->distortion_compensation = 1;

	/* CONFF */
	if (of_get_property(np, "st,invalid-input-detect-mute", NULL))
		pdata->invalid_input_detect_mute = 1;

	/* MISC */
	if (of_get_property(np, "st,activate-mute-output", NULL))
		pdata->activate_mute_output = 1;

	if (of_get_property(np, "st,bridge-immediate-off", NULL))
		pdata->bridge_immediate_off = 1;

	if (of_get_property(np, "st,noise-shape-dc-cut", NULL))
		pdata->noise_shape_dc_cut = 1;

	if (of_get_property(np, "st,powerdown-master-volume", NULL))
		pdata->powerdown_master_vol = 1;

	if (!of_property_read_u8(np, "st,powerdown-delay-divider", &tmp8)) {
		if (is_power_of_2(tmp8) && tmp8 >= 1 && tmp8 <= 128)
			pdata->powerdown_delay_divider = ilog2(tmp8);
		else
			dev_warn(dev, "Unsupported powerdown delay divider %d\n",
				 tmp8);
	}

	sta350->pdata = pdata;

	return 0;
}
Exemple #14
0
/*
 * init_winctx_regs()
 *	Initialize window context registers for a receive window.
 *	Except for caching control and marking window open, the registers
 *	are initialized in the order listed in Section 3.1.4 (Window Context
 *	Cache Register Details) of the VAS workbook although they don't need
 *	to be.
 *
 * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL
 *	(so that it can get a large contiguous area) and passes that buffer
 *	to kernel via device tree. We now write that buffer address to the
 *	FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL
 *	write the per-chip RX FIFO addresses to the windows during boot-up
 *	as a one-time task? That could work for NX but what about other
 *	receivers?  Let the receivers tell us the rx-fifo buffers for now.
 */
int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
{
	u64 val;
	int fifo_size;

	reset_window_regs(window);

	val = 0ULL;
	val = SET_FIELD(VAS_LPID, val, winctx->lpid);
	write_hvwc_reg(window, VREG(LPID), val);

	val = 0ULL;
	val = SET_FIELD(VAS_PID_ID, val, winctx->pidr);
	write_hvwc_reg(window, VREG(PID), val);

	init_xlate_regs(window, winctx->user_win);

	val = 0ULL;
	val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0);
	write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);

	/* In PowerNV, interrupts go to HV. */
	write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);

	val = 0ULL;
	val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port);
	write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val);

	val = 0ULL;
	val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid);
	write_hvwc_reg(window, VREG(PSWID), val);

	write_hvwc_reg(window, VREG(SPARE1), 0ULL);
	write_hvwc_reg(window, VREG(SPARE2), 0ULL);
	write_hvwc_reg(window, VREG(SPARE3), 0ULL);

	/*
	 * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR
	 *	 register as is - do NOT shift the address into VAS_LFIFO_BAR
	 *	 bit fields! Ok to set the page migration select fields -
	 *	 VAS ignores the lower 10+ bits in the address anyway, because
	 *	 the minimum FIFO size is 1K?
	 *
	 * See also: Design note in function header.
	 */
	val = __pa(winctx->rx_fifo);
	val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
	write_hvwc_reg(window, VREG(LFIFO_BAR), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp);
	write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type);
	val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable);
	write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val);

	write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
	write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
	write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);

	val = 0ULL;
	val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max);
	write_hvwc_reg(window, VREG(LRX_WCRED), val);

	val = 0ULL;
	val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max);
	write_hvwc_reg(window, VREG(TX_WCRED), val);

	write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
	write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);

	fifo_size = winctx->rx_fifo_size / 1024;

	val = 0ULL;
	val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size));
	write_hvwc_reg(window, VREG(LFIFO_SIZE), val);

	/* Update window control and caching control registers last so
	 * we mark the window open only after fully initializing it and
	 * pushing context to cache.
	 */

	write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);

	init_rsvd_tx_buf_count(window, winctx);

	/* for a send window, point to the matching receive window */
	val = 0ULL;
	val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id);
	write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val);

	write_hvwc_reg(window, VREG(SPARE4), 0ULL);

	val = 0ULL;
	val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable);
	val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable);
	val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early);
	val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg);
	write_hvwc_reg(window, VREG(LNOTIFY_CTL), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid);
	write_hvwc_reg(window, VREG(LNOTIFY_PID), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid);
	write_hvwc_reg(window, VREG(LNOTIFY_LPID), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid);
	write_hvwc_reg(window, VREG(LNOTIFY_TID), val);

	val = 0ULL;
	val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope);
	val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope);
	write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val);

	/* Skip read-only registers NX_UTIL and NX_UTIL_SE */

	write_hvwc_reg(window, VREG(SPARE5), 0ULL);
	write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
	write_hvwc_reg(window, VREG(SPARE6), 0ULL);

	/* Finally, push window context to memory and... */
	val = 0ULL;
	val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1);
	write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);

	/* ... mark the window open for business */
	val = 0ULL;
	val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit);
	val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win);
	val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode);
	val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode);
	val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode);
	val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode);
	val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win);
	val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
	val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
	write_hvwc_reg(window, VREG(WINCTL), val);

	return 0;
}
Exemple #15
0
static int kmap_order(pfn_t pages)
{ return MIN(ilog2(pages), KMAP_ORDERS - 1); }
Exemple #16
0
 * reclaimer will try to scan LRUs more deeply.
 *
 * The vmscan logic considers these special priorities:
 *
 * prio == DEF_PRIORITY (12): reclaimer starts with that value
 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
 * prio == 0                : close to OOM, kernel scans every page in an lru
 *
 * Any value in this range is acceptable for this tunable (i.e. from 12 to
 * 0). Current value for the vmpressure_level_critical_prio is chosen
 * empirically, but the number, in essence, means that we consider
 * critical level when scanning depth is ~10% of the lru size (vmscan
 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
 * eights).
 */
static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);

static struct vmpressure *work_to_vmpressure(struct work_struct *work)
{
    return container_of(work, struct vmpressure, work);
}

#ifdef CONFIG_MEMCG
static struct vmpressure *cg_to_vmpressure(struct cgroup *cg)
{
    return css_to_vmpressure(cgroup_subsys_state(cg, mem_cgroup_subsys_id));
}

static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
{
    struct cgroup *cg = vmpressure_to_css(vmpr)->cgroup;
Exemple #17
0
static int _vds_shared_init(vorbis_dsp_state *v,vorbis_info *vi,int encp){
  int i;
  codec_setup_info *ci=vi->codec_setup;
  private_state *b=NULL;
  int hs;

  if(ci==NULL) return 1;
  hs=ci->halfrate_flag; 

  memset(v,0,sizeof(*v));
  b=v->backend_state=_ogg_calloc(1,sizeof(*b));

  v->vi=vi;
  b->modebits=ilog2(ci->modes);

  b->transform[0]=_ogg_calloc(VI_TRANSFORMB,sizeof(*b->transform[0]));
  b->transform[1]=_ogg_calloc(VI_TRANSFORMB,sizeof(*b->transform[1]));

  /* MDCT is tranform 0 */

  b->transform[0][0]=_ogg_calloc(1,sizeof(mdct_lookup));
  b->transform[1][0]=_ogg_calloc(1,sizeof(mdct_lookup));
  mdct_init(b->transform[0][0],ci->blocksizes[0]>>hs);
  mdct_init(b->transform[1][0],ci->blocksizes[1]>>hs);

  /* Vorbis I uses only window type 0 */
  b->window[0]=ilog2(ci->blocksizes[0])-6;
  b->window[1]=ilog2(ci->blocksizes[1])-6;

  if(encp){ /* encode/decode differ here */

    /* analysis always needs an fft */
    drft_init(&b->fft_look[0],ci->blocksizes[0]);
    drft_init(&b->fft_look[1],ci->blocksizes[1]);

    /* finish the codebooks */
    if(!ci->fullbooks){
      ci->fullbooks=_ogg_calloc(ci->books,sizeof(*ci->fullbooks));
      for(i=0;i<ci->books;i++)
	vorbis_book_init_encode(ci->fullbooks+i,ci->book_param[i]);
    }

    b->psy=_ogg_calloc(ci->psys,sizeof(*b->psy));
    for(i=0;i<ci->psys;i++){
      _vp_psy_init(b->psy+i,
		   ci->psy_param[i],
		   &ci->psy_g_param,
		   ci->blocksizes[ci->psy_param[i]->blockflag]/2,
		   vi->rate);
    }

    v->analysisp=1;
  }else{
    /* finish the codebooks */
    if(!ci->fullbooks){
      ci->fullbooks=_ogg_calloc(ci->books,sizeof(*ci->fullbooks));
      for(i=0;i<ci->books;i++){
	if(vorbis_book_init_decode(ci->fullbooks+i,ci->book_param[i]))
	  return -1;
	/* decode codebooks are now standalone after init */
	vorbis_staticbook_destroy(ci->book_param[i]);
	ci->book_param[i]=NULL;
      }
    }
  }

  /* initialize the storage vectors. blocksize[1] is small for encode,
     but the correct size for decode */
  v->pcm_storage=ci->blocksizes[1];
  v->pcm=_ogg_malloc(vi->channels*sizeof(*v->pcm));
  v->pcmret=_ogg_malloc(vi->channels*sizeof(*v->pcmret));
  {
    int i;
    for(i=0;i<vi->channels;i++)
      v->pcm[i]=_ogg_calloc(v->pcm_storage,sizeof(*v->pcm[i]));
  }

  /* all 1 (large block) or 0 (small block) */
  /* explicitly set for the sake of clarity */
  v->lW=0; /* previous window size */
  v->W=0;  /* current window size */

  /* all vector indexes */
  v->centerW=ci->blocksizes[1]/2;

  v->pcm_current=v->centerW;

  /* initialize all the backend lookups */
  b->flr=_ogg_calloc(ci->floors,sizeof(*b->flr));
  b->residue=_ogg_calloc(ci->residues,sizeof(*b->residue));

  for(i=0;i<ci->floors;i++)
    b->flr[i]=_floor_P[ci->floor_type[i]]->
      look(v,ci->floor_param[i]);

  for(i=0;i<ci->residues;i++)
    b->residue[i]=_residue_P[ci->residue_type[i]]->
      look(v,ci->residue_param[i]);    

  return 0;
}
Exemple #18
0
/*
 * WARNING: This function isn't finished and has never been tested!!!!
 */
GLint gluBuild1DMipmaps( GLenum target, GLint components,
                         GLint width, GLenum format,
                         GLenum type, const void *data )
{
   GLubyte *texture;
   GLint levels, max_levels;
   GLint new_width, max_width;
   GLint i, j, k, l;

   glGetIntegerv( GL_MAX_TEXTURE_SIZE, &max_width );
   max_levels = ilog2( max_width ) + 1;

   /* Compute how many mipmap images to make */
   levels = ilog2( width ) + 1;
   if (levels>max_levels) {
      levels = max_levels;
   }

   new_width = 1 << (levels-1);

   texture = (GLubyte *) malloc( new_width * components );
   if (!texture) {
      return GLU_OUT_OF_MEMORY;
   }

   if (width != new_width) {
      /* initial rescaling */
      switch (type) {
	 case GL_UNSIGNED_BYTE:
	    {
	       GLubyte *ub_data = (GLubyte *) data;
	       for (i=0;i<new_width;i++) {
		  j = i * width / new_width;
		  for (k=0;k<components;k++) {
		     texture[i*components+k] = ub_data[j*components+k];
		  }
	       }
	    }
	    break;
	 default:
	    /* Not implemented */
	    return GLU_ERROR;
      }
   }

   /* generate and load mipmap images */
   for (l=0;l<levels;l++) {
      glTexImage1D( GL_TEXTURE_1D, l, components, new_width, 0,
		    format, GL_UNSIGNED_BYTE, texture );

      /* Scale image down to 1/2 size */
      new_width = new_width / 2;
      for (i=0;i<new_width;i++) {
	 for (k=0;k<components;k++) {
	    GLint sample1, sample2;
	    sample1 = (GLint) texture[i*2*components+k];
	    sample2 = (GLint) texture[(i*2+1)*components+k];
	    texture[i*components+k] = (GLubyte) ((sample1 + sample2) / 2);
	 }
      }
   }

   free( texture );

   /* make sure remaining mipmap levels are removed */
   for (l=levels;l<max_levels;l++) {
      glTexImage1D( GL_TEXTURE_1D, l, components, 0, 0,
		    format, GL_UNSIGNED_BYTE, NULL );
   }

   return 0;
}
Exemple #19
0
/*!
 * \brief Used to set the MFS drive parameters for a unit.
 *
 * This function assumes that the boot sector of the drive is stored in
 * the drive's sector buffer.  This function is called after MFS is
 * initialized, or after the drive has been formatted.
 *
 * NOTE: It is assumed that the drive is locked by the calling function.
 *
 * \param drive_ptr
 *
 * \return uint32_t Error code.
 */
uint32_t MFS_Mount_drive_internal(
    MFS_DRIVE_STRUCT_PTR drive_ptr)
{
    BIOS_PARAM_STRUCT_DISK_PTR bpb_ptr;
    BIOS_PARAM32_STRUCT_DISK_PTR bpb32_ptr;
    FILESYSTEM_INFO_DISK_PTR fsinfo_ptr;

    uint32_t reserved_sectors;
    uint32_t root_dir_sectors;
    uint32_t data_sectors;
    uint32_t cluster_count;

    uint32_t bpb_sector_size;
    uint32_t bpb_sector_mult;

    int error_code;
    int result = MFS_NO_ERROR;

    uint8_t *boot_sector;

    drive_ptr->DOS_DISK = false;

    error_code = MFS_sector_cache_invalidate(drive_ptr, 0, 0);
    if (error_code != MFS_NO_ERROR)
    {
        return error_code;
    }

    error_code = MFS_sector_map(drive_ptr, BOOT_SECTOR, (void **)&boot_sector, MFS_MAP_MODE_READONLY, 0);
    if (error_code != MFS_NO_ERROR)
    {
        return error_code;
    }

    /*
    ** Extract the drive parameters (BIOS Parameter Block) from the BOOT Record.
    */
    bpb_ptr = (BIOS_PARAM_STRUCT_DISK_PTR)boot_sector;
    bpb32_ptr = (BIOS_PARAM32_STRUCT_DISK_PTR)(boot_sector + sizeof(BIOS_PARAM_STRUCT_DISK));

    /*
    ** Next, check  to see that the BOOT record is that of a DOS disk.  If  not,
    ** the drive will have to be formatted by the upper layer before the drive
    ** can be 'mounted'.
    */
    if ((boot_sector[0] != MFS_DOS30_JMP) && (boot_sector[0] != MFS_DOS30_B))
    {
        result = MFS_NOT_A_DOS_DISK;
    }

    if (result == MFS_NO_ERROR)
    {
        /*
        ** Always use storage device sector size.
        ** If BPB sector size is larger, then recalculate other parameters accordingly.
        ** In any case, BPB sector size has to be multiple of device sector size, the code explicitly checks this.
        */
        bpb_sector_size = mqx_dtohs(bpb_ptr->SECTOR_SIZE);
        if (bpb_sector_size % drive_ptr->SECTOR_SIZE)
        {
            result = MFS_NOT_A_DOS_DISK;
        }
    }

    if (result == MFS_NO_ERROR)
    {
        /* Sector values from BPB are to be multiplied by this factor */
        bpb_sector_mult = bpb_sector_size / drive_ptr->SECTOR_SIZE;

        reserved_sectors = mqx_dtohs(bpb_ptr->RESERVED_SECTORS) * bpb_sector_mult;

        drive_ptr->SECTORS_PER_CLUSTER = mqx_dtohc(bpb_ptr->SECTORS_PER_CLUSTER) * bpb_sector_mult;
        drive_ptr->CLUSTER_POWER_SECTORS = ilog2(drive_ptr->SECTORS_PER_CLUSTER);
        drive_ptr->CLUSTER_POWER_BYTES = drive_ptr->SECTOR_POWER + drive_ptr->CLUSTER_POWER_SECTORS;
        drive_ptr->CLUSTER_SIZE_BYTES = drive_ptr->SECTOR_SIZE * drive_ptr->SECTORS_PER_CLUSTER;

        drive_ptr->NUMBER_OF_FAT = mqx_dtohc(bpb_ptr->NUMBER_OF_FAT);
        drive_ptr->ROOT_ENTRIES = mqx_dtohs(bpb_ptr->ROOT_ENTRIES);

        drive_ptr->SECTORS_PER_FAT = mqx_dtohs(bpb_ptr->SECTORS_PER_FAT);
        if (drive_ptr->SECTORS_PER_FAT == 0)
        {
            drive_ptr->SECTORS_PER_FAT = mqx_dtohl(bpb32_ptr->FAT_SIZE);
        }
        drive_ptr->SECTORS_PER_FAT *= bpb_sector_mult;

        drive_ptr->MEGA_SECTORS = mqx_dtohs(bpb_ptr->NUMBER_SECTORS);
        if (drive_ptr->MEGA_SECTORS == 0)
        {
            drive_ptr->MEGA_SECTORS = mqx_dtohl(bpb_ptr->MEGA_SECTORS);
        }
        drive_ptr->MEGA_SECTORS *= bpb_sector_mult;

        /* Determine FAT type by calculating the count of clusters on disk */
        drive_ptr->ENTRIES_PER_SECTOR = drive_ptr->SECTOR_SIZE / sizeof(DIR_ENTRY_DISK);
        root_dir_sectors = drive_ptr->ROOT_ENTRIES / drive_ptr->ENTRIES_PER_SECTOR;

        data_sectors = drive_ptr->MEGA_SECTORS - reserved_sectors - root_dir_sectors - (drive_ptr->NUMBER_OF_FAT * drive_ptr->SECTORS_PER_FAT);
        cluster_count = data_sectors / drive_ptr->SECTORS_PER_CLUSTER;

        /* Now we have cluster count, so we can determine FAT type */
        if (cluster_count < 4085)
        {
            drive_ptr->FAT_TYPE = MFS_FAT12;
        }
        else if (cluster_count < 65525)
        {
            drive_ptr->FAT_TYPE = MFS_FAT16;
        }
        else
        {
            drive_ptr->FAT_TYPE = MFS_FAT32;
        }

        drive_ptr->CLUSTER_SIZE_BYTES = drive_ptr->SECTOR_SIZE * drive_ptr->SECTORS_PER_CLUSTER;
        drive_ptr->CLUSTER_POWER_BYTES = drive_ptr->SECTOR_POWER + drive_ptr->CLUSTER_POWER_SECTORS;

        drive_ptr->FREE_COUNT = FSI_UNKNOWN; /* This is the unknown value */
        drive_ptr->NEXT_FREE_CLUSTER = FSI_UNKNOWN; /* MFS will calculate it later */

        drive_ptr->FAT_START_SECTOR = reserved_sectors;
        drive_ptr->DATA_START_SECTOR = drive_ptr->FAT_START_SECTOR + (drive_ptr->SECTORS_PER_FAT * drive_ptr->NUMBER_OF_FAT) + root_dir_sectors;

        if (drive_ptr->FAT_TYPE != MFS_FAT32)
        {
            /* FAT12 or FAT16 */
            drive_ptr->ROOT_START_SECTOR = drive_ptr->FAT_START_SECTOR + (drive_ptr->SECTORS_PER_FAT * drive_ptr->NUMBER_OF_FAT);
            drive_ptr->ROOT_CLUSTER = 0;
            MFS_chain_forge(drive_ptr, &drive_ptr->ROOT_CHAIN, drive_ptr->ROOT_START_SECTOR, root_dir_sectors);
        }
        else if (mqx_dtohs(bpb32_ptr->FS_VER) > MFS_FAT32_VER)
        {
            /* Unsupported FAT32 level */
            result = MFS_ERROR_UNKNOWN_FS_VERSION;
        }
        else
        {
            /* Supported FAT32 */
            drive_ptr->ROOT_CLUSTER = mqx_dtohl(bpb32_ptr->ROOT_CLUSTER);
            drive_ptr->ROOT_START_SECTOR = 0;
            MFS_chain_init(drive_ptr, &drive_ptr->ROOT_CHAIN, drive_ptr->ROOT_CLUSTER);

            drive_ptr->FS_INFO = mqx_dtohs(bpb32_ptr->FS_INFO);
        }
    }

    error_code = MFS_sector_unmap(drive_ptr, BOOT_SECTOR, 0);
    if (result == MFS_NO_ERROR)
    {
        result = error_code;
    }

    if (result != MFS_NO_ERROR)
    {
        return result;
    }

    if (drive_ptr->FAT_TYPE == MFS_FAT32)
    {

        /*
        ** Reset the FSInfo->Free_Count and the FSInfo->Next_Free to
        ** unknown (0xFFFFFFFF). MFS uses it's own internal version of these
        ** fields. If Windows uses the same disk, it will recalculate the
        ** correct fields the first time it mounts the drive.
        */

        error_code = MFS_sector_map(drive_ptr, drive_ptr->FS_INFO, (void **)&fsinfo_ptr, MFS_is_read_only(drive_ptr) ? MFS_MAP_MODE_READONLY : MFS_MAP_MODE_MODIFY, 0);
        if (error_code == MFS_NO_ERROR)
        {

            if ((mqx_dtohl(fsinfo_ptr->LEAD_SIG) == FSI_LEADSIG) && (mqx_dtohl(fsinfo_ptr->STRUCT_SIG) == FSI_STRUCTSIG) &&
                (mqx_dtohl(fsinfo_ptr->TRAIL_SIG) == FSI_TRAILSIG))
            {
                drive_ptr->FREE_COUNT = mqx_dtohl(fsinfo_ptr->FREE_COUNT);
                drive_ptr->NEXT_FREE_CLUSTER = mqx_dtohl(fsinfo_ptr->NEXT_FREE);
            }

            if (!MFS_is_read_only(drive_ptr))
            {
                mqx_htodl(fsinfo_ptr->LEAD_SIG, FSI_LEADSIG);
                mqx_htodl(fsinfo_ptr->STRUCT_SIG, FSI_STRUCTSIG);
                mqx_htodl(fsinfo_ptr->FREE_COUNT, FSI_UNKNOWN); /* compute it */
                mqx_htodl(fsinfo_ptr->NEXT_FREE, FSI_UNKNOWN); /* compute it */
                mqx_htodl(fsinfo_ptr->TRAIL_SIG, FSI_TRAILSIG);
            }

            error_code = MFS_sector_unmap(drive_ptr, drive_ptr->FS_INFO, !MFS_is_read_only(drive_ptr));
        }
        if (result == MFS_NO_ERROR)
        {
            result = error_code;
        }
    }

    drive_ptr->LAST_CLUSTER = (drive_ptr->MEGA_SECTORS - drive_ptr->DATA_START_SECTOR) / drive_ptr->SECTORS_PER_CLUSTER + 1;

    drive_ptr->CURRENT_DIR[0] = '\\'; /* Root dir */
    drive_ptr->CURRENT_DIR[1] = '\0';
    drive_ptr->CUR_DIR_CLUSTER = drive_ptr->ROOT_CLUSTER;
    drive_ptr->CUR_DIR_CHAIN_PTR = &drive_ptr->ROOT_CHAIN;

    if (result == MFS_NO_ERROR)
    {
        drive_ptr->DOS_DISK = true;
    }

    return result;
}
Exemple #20
0
int
zfs_domount(struct super_block *sb, void *data, int silent)
{
	zpl_mount_data_t *zmd = data;
	const char *osname = zmd->z_osname;
	zfs_sb_t *zsb;
	struct inode *root_inode;
	uint64_t recordsize;
	int error;

	error = zfs_sb_create(osname, &zsb);
	if (error)
		return (error);

	if ((error = dsl_prop_get_integer(osname, "recordsize",
	    &recordsize, NULL)))
		goto out;

	zsb->z_sb = sb;
	sb->s_fs_info = zsb;
	sb->s_magic = ZFS_SUPER_MAGIC;
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	sb->s_time_gran = 1;
	sb->s_blocksize = recordsize;
	sb->s_blocksize_bits = ilog2(recordsize);

#ifdef HAVE_BDI
	/*
	 * 2.6.32 API change,
	 * Added backing_device_info (BDI) per super block interfaces.  A BDI
	 * must be configured when using a non-device backed filesystem for
	 * proper writeback.  This is not required for older pdflush kernels.
	 *
	 * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead.
	 */
	zsb->z_bdi.ra_pages = 0;
	sb->s_bdi = &zsb->z_bdi;

	error = -bdi_setup_and_register(&zsb->z_bdi, "zfs", BDI_CAP_MAP_COPY);
	if (error)
		goto out;
#endif /* HAVE_BDI */

	/* Set callback operations for the file system. */
	sb->s_op = &zpl_super_operations;
	sb->s_xattr = zpl_xattr_handlers;
	sb->s_export_op = &zpl_export_operations;
#ifdef HAVE_S_D_OP
	sb->s_d_op = &zpl_dentry_operations;
#endif /* HAVE_S_D_OP */

	/* Set features for file system. */
	zfs_set_fuid_feature(zsb);

	if (dmu_objset_is_snapshot(zsb->z_os)) {
		uint64_t pval;

		atime_changed_cb(zsb, B_FALSE);
		readonly_changed_cb(zsb, B_TRUE);
		if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
			goto out;
		xattr_changed_cb(zsb, pval);
		zsb->z_issnap = B_TRUE;
		zsb->z_os->os_sync = ZFS_SYNC_DISABLED;

		mutex_enter(&zsb->z_os->os_user_ptr_lock);
		dmu_objset_set_user(zsb->z_os, zsb);
		mutex_exit(&zsb->z_os->os_user_ptr_lock);
	} else {
		error = zfs_sb_setup(zsb, B_TRUE);
	}

	/* Allocate a root inode for the filesystem. */
	error = zfs_root(zsb, &root_inode);
	if (error) {
		(void) zfs_umount(sb);
		goto out;
	}

	/* Allocate a root dentry for the filesystem */
	sb->s_root = d_make_root(root_inode);
	if (sb->s_root == NULL) {
		(void) zfs_umount(sb);
		error = ENOMEM;
		goto out;
	}

	if (!zsb->z_issnap)
		zfsctl_create(zsb);
out:
	if (error) {
		dmu_objset_disown(zsb->z_os, zsb);
		zfs_sb_free(zsb);
	}

	return (error);
}
Exemple #21
0
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
	struct buffer_head * bh;
	struct ext2_sb_info * sbi;
	struct ext2_super_block * es;
	struct inode *root;
	unsigned long block;
	unsigned long sb_block = get_sb_block(&data);
	unsigned long logic_sb_block;
	unsigned long offset = 0;
	unsigned long def_mount_opts;
	long ret = -EINVAL;
	int blocksize = BLOCK_SIZE;
	int db_count;
	int i, j;
	__le32 features;
	int err;

	err = -ENOMEM;
	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
	if (!sbi)
		goto failed;

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
	if (!sbi->s_blockgroup_lock) {
		kfree(sbi);
		goto failed;
	}
	sb->s_fs_info = sbi;
	sbi->s_sb_block = sb_block;

	spin_lock_init(&sbi->s_lock);

	/*
	 * See what the current blocksize for the device is, and
	 * use that as the blocksize.  Otherwise (or if the blocksize
	 * is smaller than the default) use the default.
	 * This is important for devices that have a hardware
	 * sectorsize that is larger than the default.
	 */
	blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
	if (!blocksize) {
		ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
		goto failed_sbi;
	}

	/*
	 * If the superblock doesn't start on a hardware sector boundary,
	 * calculate the offset.  
	 */
	if (blocksize != BLOCK_SIZE) {
		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
	} else {
		logic_sb_block = sb_block;
	}

	if (!(bh = sb_bread(sb, logic_sb_block))) {
		ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
		goto failed_sbi;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
	 *       some ext2 macro-instructions depend on its value
	 */
	es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
	if (def_mount_opts & EXT2_DEFM_DEBUG)
		set_opt(sbi->s_mount_opt, DEBUG);
	if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
		set_opt(sbi->s_mount_opt, GRPID);
	if (def_mount_opts & EXT2_DEFM_UID16)
		set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
	if (def_mount_opts & EXT2_DEFM_XATTR_USER)
		set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
	if (def_mount_opts & EXT2_DEFM_ACL)
		set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
	
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
		set_opt(sbi->s_mount_opt, ERRORS_CONT);
	else
		set_opt(sbi->s_mount_opt, ERRORS_RO);

	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
	
	set_opt(sbi->s_mount_opt, RESERVATION);

	if (!parse_options((char *) data, sb))
		goto failed_mount;

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
		 MS_POSIXACL : 0);

	ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
				    EXT2_MOUNT_XIP if not */

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
	    (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
	     EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
		ext2_msg(sb, KERN_WARNING,
			"warning: feature flags set on rev 0 fs, "
			"running e2fsck is recommended");
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
	features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
	if (features) {
		ext2_msg(sb, KERN_ERR,	"error: couldn't mount because of "
		       "unsupported optional features (%x)",
			le32_to_cpu(features));
		goto failed_mount;
	}
	if (!(sb->s_flags & MS_RDONLY) &&
	    (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
		ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
		       "unsupported optional features (%x)",
		       le32_to_cpu(features));
		goto failed_mount;
	}

	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

	if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
		if (!silent)
			ext2_msg(sb, KERN_ERR,
				"error: unsupported blocksize for xip");
		goto failed_mount;
	}

	/* If the blocksize doesn't match, re-read the thing.. */
	if (sb->s_blocksize != blocksize) {
		brelse(bh);

		if (!sb_set_blocksize(sb, blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: bad blocksize %d", blocksize);
			goto failed_sbi;
		}

		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
		offset = (sb_block*BLOCK_SIZE) % blocksize;
		bh = sb_bread(sb, logic_sb_block);
		if(!bh) {
			ext2_msg(sb, KERN_ERR, "error: couldn't read"
				"superblock on 2nd try");
			goto failed_sbi;
		}
		es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
		sbi->s_es = es;
		if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
			ext2_msg(sb, KERN_ERR, "error: magic mismatch");
			goto failed_mount;
		}
	}

	sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
	sb->s_max_links = EXT2_LINK_MAX;

	if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
		if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
		    !is_power_of_2(sbi->s_inode_size) ||
		    (sbi->s_inode_size > blocksize)) {
			ext2_msg(sb, KERN_ERR,
				"error: unsupported inode size: %d",
				sbi->s_inode_size);
			goto failed_mount;
		}
	}

	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
				   le32_to_cpu(es->s_log_frag_size);
	if (sbi->s_frag_size == 0)
		goto cantfind_ext2;
	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;

	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);

	if (EXT2_INODE_SIZE(sb) == 0)
		goto cantfind_ext2;
	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
	if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
		goto cantfind_ext2;
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
	sbi->s_desc_per_block = sb->s_blocksize /
					sizeof (struct ext2_group_desc);
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
	sbi->s_addr_per_block_bits =
		ilog2 (EXT2_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits =
		ilog2 (EXT2_DESC_PER_BLOCK(sb));

	if (sb->s_magic != EXT2_SUPER_MAGIC)
		goto cantfind_ext2;

	if (sb->s_blocksize != bh->b_size) {
		if (!silent)
			ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
		goto failed_mount;
	}

	if (sb->s_blocksize != sbi->s_frag_size) {
		ext2_msg(sb, KERN_ERR,
			"error: fragsize %lu != blocksize %lu"
			"(not supported yet)",
			sbi->s_frag_size, sb->s_blocksize);
		goto failed_mount;
	}

	if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #blocks per group too big: %lu",
			sbi->s_blocks_per_group);
		goto failed_mount;
	}
	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #fragments per group too big: %lu",
			sbi->s_frags_per_group);
		goto failed_mount;
	}
	if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
		ext2_msg(sb, KERN_ERR,
			"error: #inodes per group too big: %lu",
			sbi->s_inodes_per_group);
		goto failed_mount;
	}

	if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext2;
 	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
 				le32_to_cpu(es->s_first_data_block) - 1)
 					/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
	db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
		   EXT2_DESC_PER_BLOCK(sb);
	sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
	if (sbi->s_group_desc == NULL) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount;
	}
	bgl_lock_init(sbi->s_blockgroup_lock);
	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
	if (!sbi->s_debts) {
		ext2_msg(sb, KERN_ERR, "error: not enough memory");
		goto failed_mount_group_desc;
	}
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logic_sb_block, i);
		sbi->s_group_desc[i] = sb_bread(sb, block);
		if (!sbi->s_group_desc[i]) {
			for (j = 0; j < i; j++)
				brelse (sbi->s_group_desc[j]);
			ext2_msg(sb, KERN_ERR,
				"error: unable to read group descriptors");
			goto failed_mount_group_desc;
		}
	}
	if (!ext2_check_descriptors (sb)) {
		ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
		goto failed_mount2;
	}
	sbi->s_gdb_count = db_count;
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

	/* per fileystem reservation list head & lock */
	spin_lock_init(&sbi->s_rsv_window_lock);
	sbi->s_rsv_window_root = RB_ROOT;
	/*
	 * Add a single, static dummy reservation to the start of the
	 * reservation window list --- it gives us a placeholder for
	 * append-at-start-of-list which makes the allocation logic
	 * _much_ simpler.
	 */
	sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
	sbi->s_rsv_window_head.rsv_goal_size = 0;
	ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);

	err = percpu_counter_init(&sbi->s_freeblocks_counter,
				ext2_count_free_blocks(sb), GFP_KERNEL);
	if (!err) {
		err = percpu_counter_init(&sbi->s_freeinodes_counter,
				ext2_count_free_inodes(sb), GFP_KERNEL);
	}
	if (!err) {
		err = percpu_counter_init(&sbi->s_dirs_counter,
				ext2_count_dirs(sb), GFP_KERNEL);
	}
	if (err) {
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		goto failed_mount3;
	}
	/*
	 * set up enough so that it can read an inode
	 */
	sb->s_op = &ext2_sops;
	sb->s_export_op = &ext2_export_ops;
	sb->s_xattr = ext2_xattr_handlers;

#ifdef CONFIG_QUOTA
	sb->dq_op = &dquot_operations;
	sb->s_qcop = &dquot_quotactl_ops;
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif

	root = ext2_iget(sb, EXT2_ROOT_INO);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto failed_mount3;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
		iput(root);
		ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
		goto failed_mount3;
	}

	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
		ext2_msg(sb, KERN_ERR, "error: get root inode failed");
		ret = -ENOMEM;
		goto failed_mount3;
	}
	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
		ext2_msg(sb, KERN_WARNING,
			"warning: mounting ext3 filesystem as ext2");
	if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
	ext2_write_super(sb);
	return 0;

cantfind_ext2:
	if (!silent)
		ext2_msg(sb, KERN_ERR,
			"error: can't find an ext2 filesystem on dev %s.",
			sb->s_id);
	goto failed_mount;
failed_mount3:
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
	kfree(sbi->s_group_desc);
	kfree(sbi->s_debts);
failed_mount:
	brelse(bh);
failed_sbi:
	sb->s_fs_info = NULL;
	kfree(sbi->s_blockgroup_lock);
	kfree(sbi);
failed:
	return ret;
}
Exemple #22
0
/*
 * The queue level is based on the log2 of the hit count.
 */
static unsigned queue_level(struct entry *e)
{
	return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
}
Exemple #23
0
static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
	struct input_dev *pwr;
	int key_release_irq = platform_get_irq(pdev, 0);
	int key_press_irq = platform_get_irq(pdev, 1);
	int err;
	unsigned int delay;
	u8 pon_cntl;
	struct pmic8xxx_pwrkey *pwrkey;
	const struct pm8xxx_pwrkey_platform_data *pdata =
					dev_get_platdata(&pdev->dev);

	if (!pdata) {
		dev_err(&pdev->dev, "power key platform data not supplied\n");
		return -EINVAL;
	}

	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
	if (pdata->kpd_trigger_delay_us > USEC_PER_SEC * 2 ||
		pdata->kpd_trigger_delay_us < USEC_PER_SEC / 64) {
		dev_err(&pdev->dev, "invalid power key trigger delay\n");
		return -EINVAL;
	}

	pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
	if (!pwrkey)
		return -ENOMEM;

	pwrkey->pdata = pdata;

	pwr = input_allocate_device();
	if (!pwr) {
		dev_dbg(&pdev->dev, "Can't allocate power button\n");
		err = -ENOMEM;
		goto free_pwrkey;
	}

	input_set_capability(pwr, EV_KEY, KEY_POWER);

	pwr->name = "pmic8xxx_pwrkey";
	pwr->phys = "pmic8xxx_pwrkey/input0";
	pwr->dev.parent = &pdev->dev;

	delay = (pdata->kpd_trigger_delay_us << 6) / USEC_PER_SEC;
	delay = ilog2(delay);

	err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
	if (err < 0) {
		dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
		goto free_input_dev;
	}

	pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
	pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
	if (pdata->pull_up)
		pon_cntl |= PON_CNTL_PULL_UP;
	else
		pon_cntl &= ~PON_CNTL_PULL_UP;

	err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl);
	if (err < 0) {
		dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
		goto free_input_dev;
	}

	err = input_register_device(pwr);
	if (err) {
		dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
		goto free_input_dev;
	}

	pwrkey->key_press_irq = key_press_irq;
	pwrkey->key_release_irq = key_release_irq;
	pwrkey->pwr = pwr;

	platform_set_drvdata(pdev, pwrkey);

	/* check power key status during boot */
	err = pm8xxx_read_irq_stat(pdev->dev.parent, key_press_irq);
	if (err < 0) {
		dev_err(&pdev->dev, "reading irq status failed\n");
		goto unreg_input_dev;
	}
	pwrkey->press = !!err;

	if (pwrkey->press) {
		input_report_key(pwrkey->pwr, KEY_POWER, 1);
		input_sync(pwrkey->pwr);
	}

#ifdef CONFIG_TOUCHSCREEN_SWEEP2WAKE
	sweep2wake_setdev(pwr);
	printk(KERN_INFO "[sweep2wake]: set device %s\n", pwr->name);
#endif

	err = request_any_context_irq(key_press_irq, pwrkey_press_irq,
		IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
	if (err < 0) {
		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
				 key_press_irq, err);
		goto unreg_input_dev;
	}

	err = request_any_context_irq(key_release_irq, pwrkey_release_irq,
		 IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
	if (err < 0) {
		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
				 key_release_irq, err);

		goto free_press_irq;
	}

	device_init_wakeup(&pdev->dev, pdata->wakeup);

	return 0;

free_press_irq:
	free_irq(key_press_irq, NULL);
unreg_input_dev:
	platform_set_drvdata(pdev, NULL);
	input_unregister_device(pwr);
	pwr = NULL;
free_input_dev:
	input_free_device(pwr);
free_pwrkey:
	kfree(pwrkey);
	return err;
}