STATIC void
xfs_filestreams_trace(
	xfs_mount_t	*mp,	/* mount point */
	int		type,	/* type of trace */
	const char	*func,	/* source function */
	int		line,	/* source line number */
	__psunsigned_t	arg0,
	__psunsigned_t	arg1,
	__psunsigned_t	arg2,
	__psunsigned_t	arg3,
	__psunsigned_t	arg4,
	__psunsigned_t	arg5)
{
	ktrace_enter(xfs_filestreams_trace_buf,
		(void *)(__psint_t)(type | (line << 16)),
		(void *)func,
		(void *)(__psunsigned_t)current_pid(),
		(void *)mp,
		(void *)(__psunsigned_t)arg0,
		(void *)(__psunsigned_t)arg1,
		(void *)(__psunsigned_t)arg2,
		(void *)(__psunsigned_t)arg3,
		(void *)(__psunsigned_t)arg4,
		(void *)(__psunsigned_t)arg5,
		NULL, NULL, NULL, NULL, NULL, NULL);
}
Beispiel #2
0
void
xfs_inval_cached_trace(
	xfs_iocore_t	*io,
	xfs_off_t	offset,
	xfs_off_t	len,
	xfs_off_t	first,
	xfs_off_t	last)
{
	xfs_inode_t	*ip = XFS_IO_INODE(io);

	if (ip->i_rwtrace == NULL)
		return;
	ktrace_enter(ip->i_rwtrace,
		(void *)(__psint_t)XFS_INVAL_CACHED,
		(void *)ip,
		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
		(void *)((unsigned long)(offset & 0xffffffff)),
		(void *)((unsigned long)((len >> 32) & 0xffffffff)),
		(void *)((unsigned long)(len & 0xffffffff)),
		(void *)((unsigned long)((first >> 32) & 0xffffffff)),
		(void *)((unsigned long)(first & 0xffffffff)),
		(void *)((unsigned long)((last >> 32) & 0xffffffff)),
		(void *)((unsigned long)(last & 0xffffffff)),
		(void *)((unsigned long)current_pid()),
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL);
}
Beispiel #3
0
void
xfs_iomap_enter_trace(
	int		tag,
	xfs_inode_t	*ip,
	xfs_off_t	offset,
	ssize_t		count)
{
	if (!ip->i_rwtrace)
		return;

	ktrace_enter(ip->i_rwtrace,
		(void *)((unsigned long)tag),
		(void *)ip,
		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
		(void *)((unsigned long)(offset & 0xffffffff)),
		(void *)((unsigned long)count),
		(void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
		(void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
		(void *)((unsigned long)current_pid()),
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL);
}
Beispiel #4
0
void
xfs_rw_enter_trace(
	int			tag,
	xfs_iocore_t		*io,
	void			*data,
	size_t			segs,
	loff_t			offset,
	int			ioflags)
{
	xfs_inode_t	*ip = XFS_IO_INODE(io);

	if (ip->i_rwtrace == NULL)
		return;
	ktrace_enter(ip->i_rwtrace,
		(void *)(unsigned long)tag,
		(void *)ip,
		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
		(void *)data,
		(void *)((unsigned long)segs),
		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
		(void *)((unsigned long)(offset & 0xffffffff)),
		(void *)((unsigned long)ioflags),
		(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
		(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
		(void *)((unsigned long)current_pid()),
		(void *)NULL,
		(void *)NULL,
		(void *)NULL,
		(void *)NULL);
}
Beispiel #5
0
void libcfs_debug_dumplog(void)
{
	wait_queue_t wait;
	struct task_struct *dumper;

	/* we're being careful to ensure that the kernel thread is
	 * able to set our state to running as it exits before we
	 * get to schedule() */
	init_waitqueue_entry(&wait, current);
	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&debug_ctlwq, &wait);

	dumper = kthread_run(libcfs_debug_dumplog_thread,
			     (void *)(long)current_pid(),
			     "libcfs_debug_dumper");
	if (IS_ERR(dumper))
		pr_err("LustreError: cannot start log dump thread: %ld\n",
		       PTR_ERR(dumper));
	else
		schedule();

	/* be sure to teardown if cfs_create_thread() failed */
	remove_wait_queue(&debug_ctlwq, &wait);
	set_current_state(TASK_RUNNING);
}
Beispiel #6
0
/* ARGSUSED */
void
xfs_dqtrace_entry__(
	xfs_dquot_t *dqp,
	char *func,
	void *retaddr,
	xfs_inode_t *ip)
{
	xfs_dquot_t *udqp = NULL;
	int ino;

	ASSERT(dqp->q_trace);
	if (ip) {
		ino = ip->i_ino;
		udqp = ip->i_udquot;
	}
	ktrace_enter(dqp->q_trace,
		     (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
		     (void *)func,
		     (void *)(__psint_t)dqp->q_nrefs,
		     (void *)(__psint_t)dqp->dq_flags,
		     (void *)(__psint_t)dqp->q_res_bcount,
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT),
		     (void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT), /* 11 */
		     (void *)(__psint_t)current_pid(),
		     (void *)(__psint_t)ino,
		     (void *)(__psint_t)retaddr,
		     (void *)(__psint_t)udqp);
	return;
}
STATIC void
xfs_filestreams_trace(
	xfs_mount_t	*mp,	
	int		type,	
	const char	*func,	
	int		line,	
	__psunsigned_t	arg0,
	__psunsigned_t	arg1,
	__psunsigned_t	arg2,
	__psunsigned_t	arg3,
	__psunsigned_t	arg4,
	__psunsigned_t	arg5)
{
	ktrace_enter(xfs_filestreams_trace_buf,
		(void *)(__psint_t)(type | (line << 16)),
		(void *)func,
		(void *)(__psunsigned_t)current_pid(),
		(void *)mp,
		(void *)(__psunsigned_t)arg0,
		(void *)(__psunsigned_t)arg1,
		(void *)(__psunsigned_t)arg2,
		(void *)(__psunsigned_t)arg3,
		(void *)(__psunsigned_t)arg4,
		(void *)(__psunsigned_t)arg5,
		NULL, NULL, NULL, NULL, NULL, NULL);
}
/* ARGSUSED */
void
__xfs_dqtrace_entry(
	xfs_dquot_t	*dqp,
	char		*func,
	void		*retaddr,
	xfs_inode_t	*ip)
{
	xfs_dquot_t	*udqp = NULL;
	xfs_ino_t	ino = 0;

	ASSERT(dqp->q_trace);
	if (ip) {
		ino = ip->i_ino;
		udqp = ip->i_udquot;
	}
	ktrace_enter(dqp->q_trace,
		     (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
		     (void *)func,
		     (void *)(__psint_t)dqp->q_nrefs,
		     (void *)(__psint_t)dqp->dq_flags,
		     (void *)(__psint_t)dqp->q_res_bcount,
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
		     (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
		     (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
		     (void *)(__psint_t)current_pid(),
		     (void *)(__psint_t)ino,
		     (void *)(__psint_t)retaddr,
		     (void *)(__psint_t)udqp);
	return;
}
/*
 * This is called to attempt to lock the inode associated with this
 * inode log item, in preparation for the push routine which does the actual
 * iflush.  Don't sleep on the inode lock or the flush lock.
 *
 * If the flush lock is already held, indicating that the inode has
 * been or is in the process of being flushed, then (ideally) we'd like to
 * see if the inode's buffer is still incore, and if so give it a nudge.
 * We delay doing so until the pushbuf routine, though, to avoid holding
 * the AIL lock across a call to the blackhole which is the buffer cache.
 * Also we don't want to sleep in any device strategy routines, which can happen
 * if we do the subsequent bawrite in here.
 */
STATIC uint
xfs_inode_item_trylock(
	xfs_inode_log_item_t	*iip)
{
	register xfs_inode_t	*ip;

	ip = iip->ili_inode;

	if (xfs_ipincount(ip) > 0) {
		return XFS_ITEM_PINNED;
	}

	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
		return XFS_ITEM_LOCKED;
	}

	if (!xfs_iflock_nowait(ip)) {
		/*
		 * If someone else isn't already trying to push the inode
		 * buffer, we get to do it.
		 */
		if (iip->ili_pushbuf_flag == 0) {
			iip->ili_pushbuf_flag = 1;
#ifdef DEBUG
			iip->ili_push_owner = current_pid();
#endif
			/*
			 * Inode is left locked in shared mode.
			 * Pushbuf routine gets to unlock it.
			 */
			return XFS_ITEM_PUSHBUF;
		} else {
			/*
			 * We hold the AIL lock, so we must specify the
			 * NONOTIFY flag so that we won't double trip.
			 */
			xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
			return XFS_ITEM_FLUSHING;
		}
		/* NOTREACHED */
	}

	/* Stale items should force out the iclog */
	if (ip->i_flags & XFS_ISTALE) {
		xfs_ifunlock(ip);
		xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
		return XFS_ITEM_PINNED;
	}

#ifdef DEBUG
	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
		ASSERT(iip->ili_format.ilf_fields != 0);
		ASSERT(iip->ili_logged == 0);
		ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL);
	}
#endif
	return XFS_ITEM_SUCCESS;
}
int record_syscall_start(int syscall)
{
	int max, index;

	max = sizeof(syscall_record)/sizeof(syscall_record[0]);
	index = next_syscall_index(max);

	syscall_record[index].syscall = syscall;
	syscall_record[index].pid = current_pid();
	syscall_record[index].result = 0xdeadbeef;
	gettimeofday(&syscall_record[index].start, NULL);
	return(index);
}
Beispiel #11
0
/*
 * This is called to attempt to lock the dquot associated with this
 * dquot log item.  Don't sleep on the dquot lock or the flush lock.
 * If the flush lock is already held, indicating that the dquot has
 * been or is in the process of being flushed, then see if we can
 * find the dquot's buffer in the buffer cache without sleeping.  If
 * we can and it is marked delayed write, then we want to send it out.
 * We delay doing so until the push routine, though, to avoid sleeping
 * in any device strategy routines.
 */
STATIC uint
xfs_qm_dquot_logitem_trylock(
	xfs_dq_logitem_t	*qip)
{
	xfs_dquot_t		*dqp;
	uint			retval;

	dqp = qip->qli_dquot;
	if (dqp->q_pincount > 0)
		return (XFS_ITEM_PINNED);

	if (! xfs_qm_dqlock_nowait(dqp))
		return (XFS_ITEM_LOCKED);

	retval = XFS_ITEM_SUCCESS;
	if (! xfs_qm_dqflock_nowait(dqp)) {
		/*
		 * The dquot is already being flushed.	It may have been
		 * flushed delayed write, however, and we don't want to
		 * get stuck waiting for that to complete.  So, we want to check
		 * to see if we can lock the dquot's buffer without sleeping.
		 * If we can and it is marked for delayed write, then we
		 * hold it and send it out from the push routine.  We don't
		 * want to do that now since we might sleep in the device
		 * strategy routine.  We also don't want to grab the buffer lock
		 * here because we'd like not to call into the buffer cache
		 * while holding the AIL lock.
		 * Make sure to only return PUSHBUF if we set pushbuf_flag
		 * ourselves.  If someone else is doing it then we don't
		 * want to go to the push routine and duplicate their efforts.
		 */
		if (qip->qli_pushbuf_flag == 0) {
			qip->qli_pushbuf_flag = 1;
			ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
#ifdef DEBUG
			qip->qli_push_owner = current_pid();
#endif
			/*
			 * The dquot is left locked.
			 */
			retval = XFS_ITEM_PUSHBUF;
		} else {
			retval = XFS_ITEM_FLUSHING;
			xfs_dqunlock_nonotify(dqp);
		}
	}

	ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
	return (retval);
}
Beispiel #12
0
struct lc_watchdog *lc_watchdog_add(int timeout,
                                    void (*callback)(pid_t, void *),
                                    void *data)
{
        struct lc_watchdog *lcw = NULL;
        ENTRY;

        LIBCFS_ALLOC(lcw, sizeof(*lcw));
        if (lcw == NULL) {
                CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
                RETURN(ERR_PTR(-ENOMEM));
        }

	spin_lock_init(&lcw->lcw_lock);
	lcw->lcw_refcount = 1; /* refcount for owner */
	lcw->lcw_task     = current;
	lcw->lcw_pid      = current_pid();
	lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
	lcw->lcw_data     = data;
	lcw->lcw_state    = LC_WATCHDOG_DISABLED;

	INIT_LIST_HEAD(&lcw->lcw_list);
	cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);

	mutex_lock(&lcw_refcount_mutex);
	if (++lcw_refcount == 1)
		lcw_dispatch_start();
	mutex_unlock(&lcw_refcount_mutex);

        /* Keep this working in case we enable them by default */
        if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
                lcw->lcw_last_touched = cfs_time_current();
                cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
                              cfs_time_current());
        }

        RETURN(lcw);
}
Beispiel #13
0
/* Must be called under the lov_stripe_lock() */
int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
		   u64 size, int shrink)
{
	struct lov_oinfo *loi;
	int stripe = 0;
	__u64 kms;

	assert_spin_locked(&lsm->lsm_lock);
	LASSERT(lsm->lsm_lock_owner == current_pid());

	if (shrink) {
		for (; stripe < lsm->lsm_stripe_count; stripe++) {
			struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];

			kms = lov_size_to_stripe(lsm, size, stripe);
			CDEBUG(D_INODE,
			       "stripe %d KMS %sing %llu->%llu\n",
			       stripe, kms > loi->loi_kms ? "increase":"shrink",
			       loi->loi_kms, kms);
			loi->loi_lvb.lvb_size = kms;
			loi_kms_set(loi, loi->loi_lvb.lvb_size);
		}
		return 0;
	}

	if (size > 0)
		stripe = lov_stripe_number(lsm, size - 1);
	kms = lov_size_to_stripe(lsm, size, stripe);
	loi = lsm->lsm_oinfo[stripe];

	CDEBUG(D_INODE, "stripe %d KMS %sincreasing %llu->%llu\n",
	       stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
	if (kms > loi->loi_kms)
		loi_kms_set(loi, kms);

	return 0;
}
Beispiel #14
0
int SchedRR::tick(int cpu, const enum Motivo m) {
  int curr_pid = current_pid(cpu);
  int next_pid; 

//  cout << "curr_pid : " << curr_pid << "cpu : " << cpu << " cpu : " << cpu << " motivo : " << m << endl; 

  if (m == TICK) {
    if (curr_pid == IDLE_TASK) { //&& cpusVacios()) {
    // cout << "aaaaaaaaaaaaaaaaaaaaaaahola  " << endl;
      next_pid = this->proxIdDisponible(cpu);
    } else {
      cores[cpu].contador++;
      if (cores[cpu].contador == cores[cpu].quantum) {
        next_pid = finalizoQuantum(cpu);
        cores[cpu].contador = 0;
      } else {
        next_pid = curr_pid;
      }    
    }
  }

  if (m == BLOCK) {
    int bloq = this->cores[cpu].pid;
    this->tareasBloqueadas.push_back(bloq);
    cores[cpu].contador = 0;
    next_pid = this->proxIdDisponible(cpu);
  }

  if (m == EXIT) {
    next_pid = this->proxIdDisponible(cpu);
    this->cores[cpu].contador = 0;
  }

  //cout << "next_pid : " << next_pid << endl;

  return next_pid;
}
Beispiel #15
0
/* Must be called under the lov_stripe_lock() */
int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
		   obd_off size, int shrink)
{
	struct lov_oinfo *loi;
	int stripe = 0;
	__u64 kms;
	ENTRY;

	LASSERT(spin_is_locked(&lsm->lsm_lock));
	LASSERT(lsm->lsm_lock_owner == current_pid());

	if (shrink) {
		for (; stripe < lsm->lsm_stripe_count; stripe++) {
			struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
			kms = lov_size_to_stripe(lsm, size, stripe);
			CDEBUG(D_INODE,
			       "stripe %d KMS %sing "LPU64"->"LPU64"\n",
			       stripe, kms > loi->loi_kms ? "increas":"shrink",
			       loi->loi_kms, kms);
			loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
		}
		RETURN(0);
	}

	if (size > 0)
		stripe = lov_stripe_number(lsm, size - 1);
	kms = lov_size_to_stripe(lsm, size, stripe);
	loi = lsm->lsm_oinfo[stripe];

	CDEBUG(D_INODE, "stripe %d KMS %sincreasing "LPU64"->"LPU64"\n",
	       stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
	if (kms > loi->loi_kms)
		loi_kms_set(loi, kms);

	RETURN(0);
}
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll initiate a bawrite on that
 * buffer to expedite the process.
 *
 * We aren't holding the AIL lock (or the flush lock) when this gets called,
 * so it is inherently race-y.
 */
STATIC void
xfs_inode_item_pushbuf(
	xfs_inode_log_item_t	*iip)
{
	xfs_inode_t	*ip;
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	uint		dopush;

	ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
	 * The ili_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(iip->ili_pushbuf_flag != 0);
	ASSERT(iip->ili_push_owner == current_pid());

	/*
	 * If a flush is not in progress anymore, chances are that the
	 * inode was taken off the AIL. So, just get out.
	 */
	if (completion_done(&ip->i_flush) ||
	    ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		iip->ili_pushbuf_flag = 0;
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return;
	}

	mp = ip->i_mount;
	bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
		    iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);

	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			/*
			 * We were racing with iflush because we don't hold
			 * the AIL lock or the flush lock. However, at this point,
			 * we have the buffer, and we know that it's dirty.
			 * So, it's possible that iflush raced with us, and
			 * this item is already taken off the AIL.
			 * If not, we can flush it async.
			 */
			dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
				  !completion_done(&ip->i_flush));
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			xfs_buftrace("INODE ITEM PUSH", bp);
			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				int	error;
				error = xfs_bawrite(mp, bp);
				if (error)
					xfs_fs_cmn_err(CE_WARN, mp,
		"xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
							error, iip, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			xfs_buf_relse(bp);
		}
		return;
	}
	/*
	 * We have to be careful about resetting pushbuf flag too early (above).
	 * Even though in theory we can do it as soon as we have the buflock,
	 * we don't want others to be doing work needlessly. They'll come to
	 * this function thinking that pushing the buffer is their
	 * responsibility only to find that the buffer is still locked by
	 * another doing the same thing
	 */
	iip->ili_pushbuf_flag = 0;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return;
}
Beispiel #17
0
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	xfs_dq_logitem_t    *qip)
{
	xfs_dquot_t	*dqp;
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	uint		dopush;

	dqp = qip->qli_dquot;
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * The qli_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(qip->qli_pushbuf_flag != 0);
	ASSERT(qip->qli_push_owner == current_pid());

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (!issemalocked(&(dqp->q_flock))  ||
	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		qip->qli_pushbuf_flag = 0;
		xfs_dqunlock(dqp);
		return;
	}
	mp = dqp->q_mount;
	bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
		    XFS_QI_DQCHUNKLEN(mp),
		    XFS_INCORE_TRYLOCK);
	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
				  issemalocked(&(dqp->q_flock)));
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);

			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				int	error;
#ifdef XFSRACEDEBUG
				delay_for_intr();
				delay(300);
#endif
				error = xfs_bawrite(mp, bp);
				if (error)
					xfs_fs_cmn_err(CE_WARN, mp,
	"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
							error, qip, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);
			xfs_buf_relse(bp);
		}
		return;
	}

	qip->qli_pushbuf_flag = 0;
	xfs_dqunlock(dqp);
}
Beispiel #18
0
/** Merge the lock value block(&lvb) attributes and KMS from each of the
 * stripes in a file into a single lvb. It is expected that the caller
 * initializes the current atime, mtime, ctime to avoid regressing a more
 * uptodate time on the local client.
 */
int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
		      struct ost_lvb *lvb, __u64 *kms_place)
{
	__u64 size = 0;
	__u64 kms = 0;
	__u64 blocks = 0;
	s64 current_mtime = lvb->lvb_mtime;
	s64 current_atime = lvb->lvb_atime;
	s64 current_ctime = lvb->lvb_ctime;
	int i;
	int rc = 0;

	assert_spin_locked(&lsm->lsm_lock);
	LASSERT(lsm->lsm_lock_owner == current_pid());

	CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
	       POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime,
	       lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
	for (i = 0; i < lsm->lsm_stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		u64 lov_size, tmpsize;

		if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) {
			rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
			continue;
		}

		tmpsize = loi->loi_kms;
		lov_size = lov_stripe_size(lsm, tmpsize, i);
		if (lov_size > kms)
			kms = lov_size;

		if (loi->loi_lvb.lvb_size > tmpsize)
			tmpsize = loi->loi_lvb.lvb_size;

		lov_size = lov_stripe_size(lsm, tmpsize, i);
		if (lov_size > size)
			size = lov_size;
		/* merge blocks, mtime, atime */
		blocks += loi->loi_lvb.lvb_blocks;
		if (loi->loi_lvb.lvb_mtime > current_mtime)
			current_mtime = loi->loi_lvb.lvb_mtime;
		if (loi->loi_lvb.lvb_atime > current_atime)
			current_atime = loi->loi_lvb.lvb_atime;
		if (loi->loi_lvb.lvb_ctime > current_ctime)
			current_ctime = loi->loi_lvb.lvb_ctime;

		CDEBUG(D_INODE, "MDT ID "DOSTID" on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
		       POSTID(&lsm->lsm_oi), loi->loi_ost_idx,
		       loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime,
		       loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime,
		       loi->loi_lvb.lvb_blocks);
	}

	*kms_place = kms;
	lvb->lvb_size = size;
	lvb->lvb_blocks = blocks;
	lvb->lvb_mtime = current_mtime;
	lvb->lvb_atime = current_atime;
	lvb->lvb_ctime = current_ctime;
	return rc;
}
Beispiel #19
0
int lfsck_master_engine(void *args)
{
	struct lfsck_thread_args *lta      = args;
	struct lu_env		 *env	   = &lta->lta_env;
	struct lfsck_instance	 *lfsck    = lta->lta_lfsck;
	struct ptlrpc_thread	 *thread   = &lfsck->li_thread;
	struct dt_object	 *oit_obj  = lfsck->li_obj_oit;
	const struct dt_it_ops	 *oit_iops = &oit_obj->do_index_ops->dio_it;
	struct dt_it		 *oit_di;
	struct l_wait_info	  lwi	   = { 0 };
	int			  rc;
	ENTRY;

	oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit, BYPASS_CAPA);
	if (IS_ERR(oit_di)) {
		rc = PTR_ERR(oit_di);
		CERROR("%s: LFSCK, fail to init iteration: rc = %d\n",
		       lfsck_lfsck2name(lfsck), rc);

		GOTO(fini_args, rc);
	}

	spin_lock(&lfsck->li_lock);
	lfsck->li_di_oit = oit_di;
	spin_unlock(&lfsck->li_lock);
	rc = lfsck_prep(env, lfsck, lta->lta_lsp);
	if (rc != 0)
		GOTO(fini_oit, rc);

	CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
	       "oit_cookie = "LPU64", dir_cookie = "LPU64", parent = "DFID
	       ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
	       lfsck->li_pos_current.lp_oit_cookie,
	       lfsck->li_pos_current.lp_dir_cookie,
	       PFID(&lfsck->li_pos_current.lp_dir_parent),
	       current_pid());

	spin_lock(&lfsck->li_lock);
	thread_set_flags(thread, SVC_RUNNING);
	spin_unlock(&lfsck->li_lock);
	wake_up_all(&thread->t_ctl_waitq);

	l_wait_event(thread->t_ctl_waitq,
		     lfsck->li_start_unplug ||
		     !thread_is_running(thread),
		     &lwi);
	if (!thread_is_running(thread))
		GOTO(fini_oit, rc = 0);

	if (!cfs_list_empty(&lfsck->li_list_scan) ||
	    cfs_list_empty(&lfsck->li_list_double_scan))
		rc = lfsck_master_oit_engine(env, lfsck);
	else
		rc = 1;

	CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
	       "oit_cookie = "LPU64", dir_cookie = "LPU64", parent = "DFID
	       ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
	       lfsck->li_pos_current.lp_oit_cookie,
	       lfsck->li_pos_current.lp_dir_cookie,
	       PFID(&lfsck->li_pos_current.lp_dir_parent),
	       current_pid(), rc);

	if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
		rc = lfsck_post(env, lfsck, rc);

	if (lfsck->li_di_dir != NULL)
		lfsck_close_dir(env, lfsck);

fini_oit:
	lfsck_di_oit_put(env, lfsck);
	oit_iops->fini(env, oit_di);
	if (rc == 1) {
		if (!cfs_list_empty(&lfsck->li_list_double_scan))
			rc = lfsck_double_scan(env, lfsck);
		else
			rc = 0;
	} else {
		lfsck_quit(env, lfsck);
	}

	/* XXX: Purge the pinned objects in the future. */

fini_args:
	spin_lock(&lfsck->li_lock);
	thread_set_flags(thread, SVC_STOPPED);
	spin_unlock(&lfsck->li_lock);
	wake_up_all(&thread->t_ctl_waitq);
	lfsck_thread_args_fini(lta);
	return rc;
}
Beispiel #20
0
int begin_wait_thread( int pid, int tid, int *rc )
{
  int success = -1;
  int size;
  struct wait_info *nw 	= NULL;
  struct process *proc;
  struct thread *tr;

  ASSERT( pid == current_pid() );
  
  proc = checkout_process( pid, WRITER );
  ASSERT( proc != NULL );

  	tr = find_thread_with_id( proc, tid );
	if ( tr == NULL )
	{
		commit_process( proc );
		return -1;
	}


	// --------------------------------
	
	size = sizeof(struct wait_info);
	nw = (struct wait_info*)malloc( size ); 
	
	  nw->next = NULL;
	  nw->prev = NULL;
	  
		  nw->pid 		= current_pid();
		  nw->tid	 	= current_tid();
		  nw->success 	= -1;	// Assume failure from the very beginning.
		  nw->rc 		= -1;

	current_thread()->active_wait = nw;	// Set our active wait information.
		  
	// Now we insert it into the wait list.
	  if ( tr->waits != NULL ) tr->waits->prev = nw;
	  nw->next = tr->waits;
	  tr->waits = nw;

	// -----------------------------
	commit_process( proc );

	// ------  Now we go to sleep -------------
		proc = checkout_process( current_pid(), WRITER );
		if ( proc == NULL )
		{
			/// \todo freak out and handle stuff properly
			return -1;
		}
	
		   	disable_interrupts();
			   atomic_dec( &(proc->kernel_threads) );
			   set_thread_state( current_thread(), THREAD_WAITING );  
			   commit_process( proc );
			enable_interrupts();
			
			sched_yield();		// Release!	
			
						// Secure ourselves.
			atomic_inc( &(proc->kernel_threads) );

	// Get our process back.
  	proc = checkout_process( current_pid(), WRITER );
	if ( proc == NULL ) return -1;
			
		current_thread()->active_wait = NULL;

	commit_process( proc );
	
		// We're back. Return the correct info.
		 *rc = nw->rc;
		 success = nw->success;
	
		 // nw should have been unlinked by the scheduler.
		 // waiter should have active_wait cleared by the
		 // scheduler as well.
		 // we just need to delete it.
	
		free( nw );

  return success;

}
Beispiel #21
0
static
int ll_getxattr_common(struct inode *inode, const char *name,
		       void *buffer, size_t size, __u64 valid)
{
	struct ll_sb_info *sbi = ll_i2sbi(inode);
	struct ptlrpc_request *req = NULL;
	struct mdt_body *body;
	int xattr_type, rc;
	void *xdata;
	struct obd_capa *oc;
	struct rmtacl_ctl_entry *rce = NULL;

	CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
	       inode->i_ino, inode->i_generation, inode);

	/* listxattr have slightly different behavior from of ext3:
	 * without 'user_xattr' ext3 will list all xattr names but
	 * filtered out "^user..*"; we list them all for simplicity.
	 */
	if (!name) {
		xattr_type = XATTR_OTHER_T;
		goto do_getxattr;
	}

	xattr_type = get_xattr_type(name);
	rc = xattr_type_filter(sbi, xattr_type);
	if (rc)
		return rc;

	/* b15587: ignore security.capability xattr for now */
	if ((xattr_type == XATTR_SECURITY_T &&
	    strcmp(name, "security.capability") == 0))
		return -ENODATA;

	/* LU-549:  Disable security.selinux when selinux is disabled */
	if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
	    strcmp(name, "security.selinux") == 0)
		return -EOPNOTSUPP;

#ifdef CONFIG_FS_POSIX_ACL
	if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
	    (xattr_type == XATTR_ACL_ACCESS_T ||
	    xattr_type == XATTR_ACL_DEFAULT_T)) {
		rce = rct_search(&sbi->ll_rct, current_pid());
		if (rce == NULL ||
		    (rce->rce_ops != RMT_LSETFACL &&
		    rce->rce_ops != RMT_LGETFACL &&
		    rce->rce_ops != RMT_RSETFACL &&
		    rce->rce_ops != RMT_RGETFACL))
			return -EOPNOTSUPP;
	}

	/* posix acl is under protection of LOOKUP lock. when calling to this,
	 * we just have path resolution to the target inode, so we have great
	 * chance that cached ACL is uptodate.
	 */
	if (xattr_type == XATTR_ACL_ACCESS_T &&
	    !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
		struct ll_inode_info *lli = ll_i2info(inode);
		struct posix_acl *acl;

		spin_lock(&lli->lli_lock);
		acl = posix_acl_dup(lli->lli_posix_acl);
		spin_unlock(&lli->lli_lock);

		if (!acl)
			return -ENODATA;

		rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
		posix_acl_release(acl);
		return rc;
	}
	if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
		return -ENODATA;
#endif

do_getxattr:
	if (sbi->ll_xattr_cache_enabled && (rce == NULL ||
					    rce->rce_ops == RMT_LGETFACL ||
					    rce->rce_ops == RMT_LSETFACL)) {
		rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
		if (rc < 0)
			GOTO(out_xattr, rc);
	} else {
		oc = ll_mdscapa_get(inode);
		rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
				valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
				name, NULL, 0, size, 0, &req);
		capa_put(oc);

		if (rc < 0)
			GOTO(out_xattr, rc);

		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
		LASSERT(body);

		/* only detect the xattr size */
		if (size == 0)
			GOTO(out, rc = body->eadatasize);

		if (size < body->eadatasize) {
			CERROR("server bug: replied size %u > %u\n",
				body->eadatasize, (int)size);
			GOTO(out, rc = -ERANGE);
		}

		if (body->eadatasize == 0)
			GOTO(out, rc = -ENODATA);

		/* do not need swab xattr data */
		xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
							body->eadatasize);
		if (!xdata)
			GOTO(out, rc = -EFAULT);

		memcpy(buffer, xdata, body->eadatasize);
		rc = body->eadatasize;
	}

#ifdef CONFIG_FS_POSIX_ACL
	if (rce && rce->rce_ops == RMT_LSETFACL) {
		ext_acl_xattr_header *acl;

		acl = lustre_posix_acl_xattr_2ext(
					(posix_acl_xattr_header *)buffer, rc);
		if (IS_ERR(acl))
			GOTO(out, rc = PTR_ERR(acl));

		rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
			    xattr_type, acl);
		if (unlikely(rc < 0)) {
			lustre_ext_acl_xattr_free(acl);
			GOTO(out, rc);
		}
	}
#endif

out_xattr:
	if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
		LCONSOLE_INFO(
			"%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
			ll_get_fsname(inode->i_sb, NULL, 0), rc);
		sbi->ll_flags &= ~LL_SBI_USER_XATTR;
	}
out:
	ptlrpc_req_finished(req);
	return rc;
}
Beispiel #22
0
static
int ll_setxattr_common(struct inode *inode, const char *name,
		       const void *value, size_t size,
		       int flags, __u64 valid)
{
	struct ll_sb_info *sbi = ll_i2sbi(inode);
	struct ptlrpc_request *req = NULL;
	int xattr_type, rc;
	struct obd_capa *oc;
	struct rmtacl_ctl_entry *rce = NULL;
#ifdef CONFIG_FS_POSIX_ACL
	posix_acl_xattr_header *new_value = NULL;
	ext_acl_xattr_header *acl = NULL;
#endif
	const char *pv = value;

	xattr_type = get_xattr_type(name);
	rc = xattr_type_filter(sbi, xattr_type);
	if (rc)
		return rc;

	/* b10667: ignore lustre special xattr for now */
	if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
	    (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
		return 0;

	/* b15587: ignore security.capability xattr for now */
	if ((xattr_type == XATTR_SECURITY_T &&
	    strcmp(name, "security.capability") == 0))
		return 0;

	/* LU-549:  Disable security.selinux when selinux is disabled */
	if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
	    strcmp(name, "security.selinux") == 0)
		return -EOPNOTSUPP;

#ifdef CONFIG_FS_POSIX_ACL
	if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
	    (xattr_type == XATTR_ACL_ACCESS_T ||
	    xattr_type == XATTR_ACL_DEFAULT_T)) {
		rce = rct_search(&sbi->ll_rct, current_pid());
		if (rce == NULL ||
		    (rce->rce_ops != RMT_LSETFACL &&
		    rce->rce_ops != RMT_RSETFACL))
			return -EOPNOTSUPP;

		if (rce->rce_ops == RMT_LSETFACL) {
			struct eacl_entry *ee;

			ee = et_search_del(&sbi->ll_et, current_pid(),
					   ll_inode2fid(inode), xattr_type);
			LASSERT(ee != NULL);
			if (valid & OBD_MD_FLXATTR) {
				acl = lustre_acl_xattr_merge2ext(
						(posix_acl_xattr_header *)value,
						size, ee->ee_acl);
				if (IS_ERR(acl)) {
					ee_free(ee);
					return PTR_ERR(acl);
				}
				size =  CFS_ACL_XATTR_SIZE(\
						le32_to_cpu(acl->a_count), \
						ext_acl_xattr);
				pv = (const char *)acl;
			}
			ee_free(ee);
		} else if (rce->rce_ops == RMT_RSETFACL) {
			size = lustre_posix_acl_xattr_filter(
						(posix_acl_xattr_header *)value,
						size, &new_value);
			if (unlikely(size < 0))
				return size;

			pv = (const char *)new_value;
		} else
			return -EOPNOTSUPP;

		valid |= rce_ops2valid(rce->rce_ops);
	}
#endif
	if (sbi->ll_xattr_cache_enabled &&
	    (rce == NULL || rce->rce_ops == RMT_LSETFACL)) {
		rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags);
	} else {
		oc = ll_mdscapa_get(inode);
		rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
				valid, name, pv, size, 0, flags,
				ll_i2suppgid(inode), &req);
		capa_put(oc);
	}

#ifdef CONFIG_FS_POSIX_ACL
	if (new_value != NULL)
		lustre_posix_acl_xattr_free(new_value, size);
	if (acl != NULL)
		lustre_ext_acl_xattr_free(acl);
#endif
	if (rc) {
		if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
			LCONSOLE_INFO("Disabling user_xattr feature because "
				      "it is not supported on the server\n");
			sbi->ll_flags &= ~LL_SBI_USER_XATTR;
		}
		return rc;
	}

	ptlrpc_req_finished(req);
	return 0;
}
Beispiel #23
0
int linux_get_tasks(struct target *target, int context)
{
    int loop = 0;
    int retval = 0;
    struct linux_os *linux_os = (struct linux_os *)
                                target->rtos->rtos_specific_params;
    linux_os->thread_list = NULL;
    linux_os->thread_count = 0;

    if (linux_os->init_task_addr == 0xdeadbeef) {
        LOG_INFO("no init symbol\n");
        return ERROR_FAIL;
    }

    int64_t start = timeval_ms();

    struct threads *t = calloc(1, sizeof(struct threads));
    struct threads *last = NULL;
    t->base_addr = linux_os->init_task_addr;
    /* retrieve the thread id , currently running in the different smp core */
    get_current(target, 1);

    while (((t->base_addr != linux_os->init_task_addr) &&
            (t->base_addr != 0)) || (loop == 0)) {
        loop++;
        fill_task(target, t);
        retval = get_name(target, t);

        if (loop > MAX_THREADS) {
            free(t);
            LOG_INFO("more than %d threads !!", MAX_THREADS);
            return ERROR_FAIL;
        }

        if (retval != ERROR_OK) {
            free(t);
            return ERROR_FAIL;
        }

        /*  check that this thread is not one the current threads already
         *  created */
#ifdef PID_CHECK

        if (!current_pid(linux_os, t->pid)) {
#else
        if (!current_base_addr(linux_os, t->base_addr)) {
#endif
            t->threadid = linux_os->threadid_count;
            t->status = 1;
            linux_os->threadid_count++;

            linux_os->thread_list =
                liste_add_task(linux_os->thread_list, t, &last);
            /* no interest to fill the context if it is a current thread. */
            linux_os->thread_count++;
            t->thread_info_addr = 0xdeadbeef;

            if (context)
                t->context =
                    cpu_context_read(target, t->base_addr,
                                     &t->thread_info_addr);
        } else {
            /*LOG_INFO("thread %s is a current thread already created",t->name); */
            free(t);
        }

        uint32_t base_addr = next_task(target, t);
        t = calloc(1, sizeof(struct threads));
        t->base_addr = base_addr;
    }

    linux_os->threads_lookup = 1;
    linux_os->threads_needs_update = 0;
    linux_os->preupdtate_threadid_count = linux_os->threadid_count - 1;
    /*  check that all current threads have been identified  */

    LOG_INFO("complete time %" PRId64 ", thread mean %" PRId64 "\n",
             (timeval_ms() - start),
             (timeval_ms() - start) / linux_os->threadid_count);

    LOG_INFO("threadid count %d", linux_os->threadid_count);
    free(t);

    return ERROR_OK;
}

static int clean_threadlist(struct target *target)
{
    struct linux_os *linux_os = (struct linux_os *)
                                target->rtos->rtos_specific_params;
    struct threads *old, *temp = linux_os->thread_list;

    while (temp != NULL) {
        old = temp;

        if (temp->context)
            free(temp->context);

        temp = temp->next;
        free(old);
    }

    return ERROR_OK;
}

static int linux_os_clean(struct target *target)
{
    struct linux_os *os_linux = (struct linux_os *)
                                target->rtos->rtos_specific_params;
    clean_threadlist(target);
    os_linux->init_task_addr = 0xdeadbeef;
    os_linux->name = "linux";
    os_linux->thread_list = NULL;
    os_linux->thread_count = 0;
    os_linux->nr_cpus = 0;
    os_linux->threads_lookup = 0;
    os_linux->threads_needs_update = 0;
    os_linux->threadid_count = 1;
    return ERROR_OK;
}

static int insert_into_threadlist(struct target *target, struct threads *t)
{
    struct linux_os *linux_os = (struct linux_os *)
                                target->rtos->rtos_specific_params;
    struct threads *temp = linux_os->thread_list;
    t->threadid = linux_os->threadid_count;
    linux_os->threadid_count++;
    t->status = 1;
    t->next = NULL;

    if (temp == NULL)
        linux_os->thread_list = t;
    else {
        while (temp->next != NULL)
            temp = temp->next;

        t->next = NULL;
        temp->next = t;
    }

    return ERROR_OK;
}

static void linux_identify_current_threads(struct target *target)
{
    struct linux_os *linux_os = (struct linux_os *)
                                target->rtos->rtos_specific_params;
    struct threads *thread_list = linux_os->thread_list;
    struct current_thread *ct = linux_os->current_threads;
    struct threads *t = NULL;

    while ((ct != NULL)) {
        if (ct->threadid == -1) {

            /*  un-identified thread */
            int found = 0;
            t = calloc(1, sizeof(struct threads));
            t->base_addr = ct->TS;
#ifdef PID_CHECK

            if (fill_task_pid(target, t) != ERROR_OK) {
error_handling:
                free(t);
                LOG_ERROR
                ("linux identify_current_threads: unable to read pid");
                return;
            }
#endif

            /* search in the list of threads if pid
               already present */
            while ((thread_list != NULL) && (found == 0)) {
#ifdef PID_CHECK
                if (thread_list->pid == t->pid) {
#else
                if (thread_list->base_addr == t->base_addr) {
#endif
                    free(t);
                    t = thread_list;
                    found = 1;
                }
                thread_list = thread_list->next;
            }

            if (!found) {
                /*  it is a new thread */
                if (fill_task(target, t) != ERROR_OK)
                    goto error_handling;

                get_name(target, t);
                insert_into_threadlist(target, t);
                t->thread_info_addr = 0xdeadbeef;
            }

            t->status = 3;
            ct->threadid = t->threadid;
#ifdef PID_CHECK
            ct->pid = t->pid;
#endif
            linux_os->thread_count++;
#if 0
            if (found == 0)
                LOG_INFO("current thread core %x identified %s",
                         ct->core_id, t->name);
            else
                LOG_INFO("current thread core %x, reused %s",
                         ct->core_id, t->name);
#endif
        }
#if 0
        else {
            struct threads tmp;
            tmp.base_addr = ct->TS;
            get_name(target, &tmp);
            LOG_INFO("current thread core %x , already identified %s !!!",
                     ct->core_id, tmp.name);
        }
#endif
        ct = ct->next;
    }

    return;
#ifndef PID_CHECK
error_handling:
    free(t);
    LOG_ERROR("unable to read pid");
    return;

#endif
}
Beispiel #24
0
/* Code goes along with:
**    entry.s:        ENTRY_NAME(sys_cpus)   / * 215, for cpu stat * /
*/
int sys_cpus(int argc, char **argv)
{
	int i,j=0;
	extern int current_pid(int cpu);

	if( argc > 2 ) {
		printk("sys_cpus:Only one argument supported\n");
		return (-1);
	}
	if ( argc == 1 ){
	
#ifdef DUMP_MORE_STATE
		for(i=0; i<NR_CPUS; i++) {
			int cpus_per_line = 4;
			if(cpu_online(i)) {
				if (j++ % cpus_per_line)
					printk(" %3d",i);
				else
					printk("\n %3d",i);
			}
		}
		printk("\n"); 
#else
	    	printk("\n 0\n"); 
#endif
	} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
		printk("\nCPUSTATE  TASK CPUNUM CPUID HARDCPU(HPA)\n");
#ifdef DUMP_MORE_STATE
		for(i=0;i<NR_CPUS;i++) {
			if (!cpu_online(i))
				continue;
			if (cpu_data[i].cpuid != NO_PROC_ID) {
				switch(cpu_data[i].state) {
					case STATE_RENDEZVOUS:
						printk("RENDEZVS ");
						break;
					case STATE_RUNNING:
						printk((current_pid(i)!=0) ? "RUNNING  " : "IDLING   ");
						break;
					case STATE_STOPPED:
						printk("STOPPED  ");
						break;
					case STATE_HALTED:
						printk("HALTED   ");
						break;
					default:
						printk("%08x?", cpu_data[i].state);
						break;
				}
				if(cpu_online(i)) {
					printk(" %4d",current_pid(i));
				}	
				printk(" %6d",cpu_number_map(i));
				printk(" %5d",i);
				printk(" 0x%lx\n",cpu_data[i].hpa);
			}	
		}
#else
		printk("\n%s  %4d      0     0 --------",
			(current->pid)?"RUNNING ": "IDLING  ",current->pid); 
#endif
	} else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 
#ifdef DUMP_MORE_STATE
     		printk("\nCPUSTATE   CPUID\n");
		for (i=0;i<NR_CPUS;i++) {
			if (!cpu_online(i))
				continue;
			if (cpu_data[i].cpuid != NO_PROC_ID) {
				switch(cpu_data[i].state) {
					case STATE_RENDEZVOUS:
						printk("RENDEZVS");break;
					case STATE_RUNNING:
						printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
						break;
					case STATE_STOPPED:
						printk("STOPPED ");break;
					case STATE_HALTED:
						printk("HALTED  ");break;
					default:
				}
				printk("  %5d\n",i);
			}	
		}
#else
		printk("\n%s    CPU0",(current->pid==0)?"RUNNING ":"IDLING  "); 
#endif
	} else {
Beispiel #25
0
int begin_wait_process( int pid, int *rc )
{
  int success = -1;
  struct wait_info *nw 	= NULL;
  struct process *proc;

  proc = checkout_process( pid, WRITER );
  if ( proc == NULL ) return -1;


	nw = (struct wait_info*)malloc( sizeof(struct wait_info) ); 
	
	  nw->next = NULL;
	  nw->prev = NULL;
	  
		  nw->pid 		= current_pid();
		  nw->tid	 	= current_tid();
		  nw->success 	= -1;	// Assume failure from the very beginning.
		  nw->rc 		= -1;

	// Now we insert it into the wait list.
	  if ( proc->waits != NULL ) proc->waits->prev = nw;
	  nw->next = proc->waits;
	  proc->waits = nw;

	// -----------------------------
	commit_process( proc );

	// ------  Now we go to sleep -------------
		proc = checkout_process( current_pid(), WRITER );
		ASSERT( proc != NULL );

		current_thread()->active_wait = nw;		// Save our active wait.
	
		   	disable_interrupts();
			   atomic_dec( &(proc->kernel_threads) );
			   set_thread_state( current_thread(), THREAD_WAITING );  
			   commit_process( proc );
			enable_interrupts();

			sched_yield();	
			
			atomic_inc( &(proc->kernel_threads) );	// Secure this thread.
		
	// Get our process back.
  	proc = checkout_process( current_pid(), WRITER );
	ASSERT( proc != NULL );

		current_thread()->active_wait = NULL;
	
	commit_process( proc );

			
	// We're back. Return the correct info.
	 *rc = nw->rc;
	 success = nw->success;

	 // nw should have been unlinked by the scheduler.
	 // waiter should have active_wait cleared by the
	 // scheduler as well.
	 // we just need to delete it.

	free( nw );

	/// \todo active_waits for threads.

  return success;
}