示例#1
0
文件: test.c 项目: Walkerks/locks
static int test_mutex_timed_thread_func(void* arg)
{
  int ret;
  struct timespec ts;
  struct TestMutexTimedData* data = (struct TestMutexTimedData*) arg;

  ret = mtx_timedlock(&(data->mutex), &(data->timeout));
  assert (ret == thrd_timedout);

  timespec_get(&ts, TIME_UTC);
  ret = timespec_compare(&ts, &(data->start));
  assert (ret >= 0);
  ret = timespec_compare(&ts, &(data->timeout));
  assert (ret >= 0);
  ret = timespec_compare(&ts, &(data->end));
  assert (ret < 0);

  ret = mtx_lock(&(data->mutex));
  assert (ret == thrd_success);

  timespec_get(&ts, TIME_UTC);
  ret = timespec_compare(&ts, &(data->end));
  assert (ret >= 0);

  ret = timespec_compare(&ts, &(data->upper));
  assert (ret < 0);

  mtx_unlock(&(data->mutex));

  return 0;
}
示例#2
0
文件: recovery.c 项目: 020gzh/linux
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}
示例#3
0
/* copy a/m/ctime from the lower branch with the newest times */
void unionfs_copy_attr_times(struct inode *upper)
{
	int bindex;
	struct inode *lower;

	if (!upper)
		return;
	if (ibstart(upper) < 0) {
#ifdef CONFIG_UNION_FS_DEBUG
		WARN_ON(ibstart(upper) < 0);
#endif /* CONFIG_UNION_FS_DEBUG */
		return;
	}
	for (bindex = ibstart(upper); bindex <= ibend(upper); bindex++) {
		lower = unionfs_lower_inode_idx(upper, bindex);
		if (!lower)
			continue; /* not all lower dir objects may exist */
		if (unlikely(timespec_compare(&upper->i_mtime,
					      &lower->i_mtime) < 0))
			upper->i_mtime = lower->i_mtime;
		if (unlikely(timespec_compare(&upper->i_ctime,
					      &lower->i_ctime) < 0))
			upper->i_ctime = lower->i_ctime;
		if (unlikely(timespec_compare(&upper->i_atime,
					      &lower->i_atime) < 0))
			upper->i_atime = lower->i_atime;
	}
}
示例#4
0
文件: timer.c 项目: achrafweb/FreeRDP
void InsertTimerQueueTimer(WINPR_TIMER_QUEUE_TIMER** pHead, WINPR_TIMER_QUEUE_TIMER* timer)
{
	WINPR_TIMER_QUEUE_TIMER* node;

	if (!(*pHead))
	{
		*pHead = timer;
		timer->next = NULL;
		return;
	}

	node = *pHead;

	while (node->next)
	{
		if (timespec_compare(&(timer->ExpirationTime), &(node->ExpirationTime)) > 0)
		{
			if (timespec_compare(&(timer->ExpirationTime), &(node->next->ExpirationTime)) < 0)
				break;
		}

		node = node->next;
	}

	if (node->next)
	{
		timer->next = node->next->next;
		node->next = timer;
	}
	else
	{
		node->next = timer;
		timer->next = NULL;
	}
}
示例#5
0
文件: stats.c 项目: Xyratex/LoadSim
void stat_time_update(struct op_time_stat *st, struct timespec *val)
{
	DPRINT("update stat %d:%ld\n", (int)val->tv_sec, val->tv_nsec);

	if (timespec_compare(&st->ot_min, val) > 0) {
		st->ot_min = *val;
	}
	if (timespec_compare(&st->ot_max, val) < 0) {
		st->ot_max = *val;
	}
	st->ot_count ++;
	st->ot_sum.tv_sec += val->tv_sec;
	st->ot_sum.tv_nsec += val->tv_nsec;
}
示例#6
0
文件: hello.c 项目: chowp/mac80211
/*
Insert a packet to the carrier sense or hidden teriminal list
*/
void update_list( unsigned char mac1[6], unsigned char mac2[6],struct timespec  value,int t){
	struct timespec tmp1 = {0};
	if(timespec_compare(&value,&tmp1)<0){
		printk(KERN_DEBUG "[warning]There exists negtive dmac!\n");
	}
	int i;
	struct inf_info * tmp;
	for(i=0;i<CS_NUMBER;i++){
                tmp = (struct inf_info *)&cs[t][i];
		if( (tmp->value.tv_nsec != 0) && 
                    (matched(ether_sprintf_test3(tmp->wlan_src),ether_sprintf_test4(tmp->wlan_dst),ether_sprintf_test(mac1),ether_sprintf_test2(mac2),t) == true) ){
                       tmp->value = timespec_add(tmp->value,value);
			return;
                }
        }
        // there is no match!!
        for(i=0;i<CS_NUMBER;i++)
        {
                tmp = (struct inf_info *)&cs[t][i];
                if( (cs[t][i].value.tv_nsec == 0 ) && (control_address(ether_sprintf_test(mac1))== 0)&& (control_address(ether_sprintf_test2(mac2))== 0))
                {
                        memcpy(cs[t][i].wlan_src,mac1,MAC_LEN);
                        memcpy(cs[t][i].wlan_dst,mac2,MAC_LEN);
                        cs[t][i].value = value;
                        return; 
                }
        }
}
int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value)
{
	u32 status = 0;
	struct timespec temptime;
	const struct timespec timedelay = {
		0,
		min_t(u32, abs(dw9714_dev.number_of_steps)*DELAY_PER_STEP_NS,
			DELAY_MAX_PER_STEP_NS),
	};

	ktime_get_ts(&temptime);

	temptime = timespec_sub(temptime, (dw9714_dev.timestamp_t_focus_abs));

	if (timespec_compare(&temptime, &timedelay) <= 0) {
		status |= ATOMISP_FOCUS_STATUS_MOVING;
		status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
	} else {
		status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
		status |= ATOMISP_FOCUS_HP_COMPLETE;
	}
	*value = status;

	return 0;
}
示例#8
0
static struct dirent *dirsort_readdir(vfs_handle_struct *handle,
					  DIR *dirp,
					  SMB_STRUCT_STAT *sbuf)
{
	struct dirsort_privates *data = NULL;
	struct timespec current_mtime;

	SMB_VFS_HANDLE_GET_DATA(handle, data, struct dirsort_privates,
				return NULL);

	if (get_sorted_dir_mtime(handle, data, &current_mtime) == false) {
		return NULL;
	}

	/* throw away cache and re-read the directory if we've changed */
	if (timespec_compare(&current_mtime, &data->mtime) > 1) {
		open_and_sort_dir(handle, data);
	}

	if (data->pos >= data->number_of_entries) {
		return NULL;
	}

	return &data->directory_list[data->pos++];
}
示例#9
0
文件: timer.c 项目: achrafweb/FreeRDP
static void* TimerQueueThread(void* arg)
{
	int status;
	struct timespec timeout;
	WINPR_TIMER_QUEUE* timerQueue = (WINPR_TIMER_QUEUE*) arg;

	while (1)
	{
		pthread_mutex_lock(&(timerQueue->cond_mutex));
		timespec_gettimeofday(&timeout);

		if (!timerQueue->activeHead)
		{
			timespec_add_ms(&timeout, 50);
		}
		else
		{
			if (timespec_compare(&timeout, &(timerQueue->activeHead->ExpirationTime)) < 0)
			{
				timespec_copy(&timeout, &(timerQueue->activeHead->ExpirationTime));
			}
		}

		status = pthread_cond_timedwait(&(timerQueue->cond), &(timerQueue->cond_mutex), &timeout);
		FireExpiredTimerQueueTimers(timerQueue);
		pthread_mutex_unlock(&(timerQueue->cond_mutex));

		if (timerQueue->bCancelled)
			break;
	}

	return NULL;
}
示例#10
0
文件: time.c 项目: 0-T-0/ps4-linux
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
				   unsigned long was_set, void *priv)
{
	/* Protected by the calling core code serialization */
	static struct timespec next_sync;

	struct xen_platform_op op;
	struct timespec now;

	now = __current_kernel_time();

	/*
	 * We only take the expensive HV call when the clock was set
	 * or when the 11 minutes RTC synchronization time elapsed.
	 */
	if (!was_set && timespec_compare(&now, &next_sync) < 0)
		return NOTIFY_OK;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now.tv_sec;
	op.u.settime.nsecs = now.tv_nsec;
	op.u.settime.system_time = xen_clocksource_read();

	(void)HYPERVISOR_dom0_op(&op);

	/*
	 * Move the next drift compensation time 11 minutes
	 * ahead. That's emulating the sync_cmos_clock() update for
	 * the hardware RTC.
	 */
	next_sync = now;
	next_sync.tv_sec += 11 * 60;

	return NOTIFY_OK;
}
示例#11
0
文件: inode.c 项目: Addision/LVS
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	ip->i_disksize = be64_to_cpu(str->di_size);
	i_size_write(&ip->i_inode, ip->i_disksize);
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	ip->i_eattr = be64_to_cpu(str->di_eattr);
	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	if (gfs2_consist_inode(ip))
		gfs2_dinode_print(ip);
	return -EIO;
}
示例#12
0
文件: hello.c 项目: chowp/mac80211
void backup_sniffer_packet(struct timespec tw, struct timespec te, int ampdu_type,int t){
        struct timespec tr;
	int j = 0;
        //first round
        int jump = 0;
	for (j =current_index[t];; j=(j-1+HOLD_TIME)%HOLD_TIME){
		jump = jump + 1;
		if (jump == HOLD_TIME){
			break; 
		}
		clear_timespec(&tr);
                tr = store[t][j].te;
                if ((timespec_compare(&tr,&tw)>0) && (timespec_compare(&tr ,&te)<0)){
			backup_store[t][j] = store[t][j];
		}
	}
}
示例#13
0
static void au_do_dir_ts(void *arg)
{
	struct au_dir_ts_arg *a = arg;
	struct au_dtime dt;
	struct path h_path;
	struct inode *dir, *h_dir;
	struct super_block *sb;
	struct au_branch *br;
	struct au_hinode *hdir;
	int err;
	aufs_bindex_t btop, bindex;

	sb = a->dentry->d_sb;
	if (d_really_is_negative(a->dentry))
		goto out;
	/* no dir->i_mutex lock */
	aufs_read_lock(a->dentry, AuLock_DW); /* noflush */

	dir = d_inode(a->dentry);
	btop = au_ibtop(dir);
	bindex = au_br_index(sb, a->brid);
	if (bindex < btop)
		goto out_unlock;

	br = au_sbr(sb, bindex);
	h_path.dentry = au_h_dptr(a->dentry, bindex);
	if (!h_path.dentry)
		goto out_unlock;
	h_path.mnt = au_br_mnt(br);
	au_dtime_store(&dt, a->dentry, &h_path);

	br = au_sbr(sb, btop);
	if (!au_br_writable(br->br_perm))
		goto out_unlock;
	h_path.dentry = au_h_dptr(a->dentry, btop);
	h_path.mnt = au_br_mnt(br);
	err = vfsub_mnt_want_write(h_path.mnt);
	if (err)
		goto out_unlock;
	hdir = au_hi(dir, btop);
	au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
	h_dir = au_h_iptr(dir, btop);
	if (h_dir->i_nlink
	    && timespec_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) {
		dt.dt_h_path = h_path;
		au_dtime_revert(&dt);
	}
	au_hn_inode_unlock(hdir);
	vfsub_mnt_drop_write(h_path.mnt);
	au_cpup_attr_timesizes(dir);

out_unlock:
	aufs_read_unlock(a->dentry, AuLock_DW);
out:
	dput(a->dentry);
	au_nwt_done(&au_sbi(sb)->si_nowait);
	kfree(arg);
}
示例#14
0
void addTimeAndBusyWait(struct timespec* time, long nanoseconds)
{
    addTime(time, nanoseconds);
    struct timespec actualTime;
    do
    {
        get_monotonic_boottime(&actualTime);
    } while (timespec_compare(time, &actualTime) > 0);
}
示例#15
0
文件: file.c 项目: Mr-Aloof/wl500g
int ocfs2_should_update_atime(struct inode *inode,
			      struct vfsmount *vfsmnt)
{
	struct timespec now;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return 0;

	if ((inode->i_flags & S_NOATIME) ||
	    ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
		return 0;

	/*
	 * We can be called with no vfsmnt structure - NFSD will
	 * sometimes do this.
	 *
	 * Note that our action here is different than touch_atime() -
	 * if we can't tell whether this is a noatime mount, then we
	 * don't know whether to trust the value of s_atime_quantum.
	 */
	if (vfsmnt == NULL)
		return 0;

	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
		return 0;

	if (vfsmnt->mnt_flags & MNT_RELATIME) {
		if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
		    (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
			return 1;

		return 0;
	}

	now = CURRENT_TIME;
	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
		return 0;
	else
		return 1;
}
示例#16
0
static void dirsort_seekdir(vfs_handle_struct *handle, DIR *dirp,
			    long offset)
{
	struct timespec current_mtime;
	struct dirsort_privates *data = NULL;

	SMB_VFS_HANDLE_GET_DATA(handle, data, struct dirsort_privates, return);

	/* Find the entry holding dirp. */
	while(data && (data->source_directory != dirp)) {
		data = data->next;
	}
	if (data == NULL) {
		return;
	}
	if (offset >= data->number_of_entries) {
		return;
	}
	data->pos = offset;

	if (get_sorted_dir_mtime(handle, data, &current_mtime) == false) {
		return;
	}

	if (timespec_compare(&current_mtime, &data->mtime)) {
		/* Directory changed. We must re-read the
		   cache and search for the name that was
		   previously stored at the offset being
		   requested, otherwise after the re-sort
		   we will point to the wrong entry. The
		   OS/2 incremental delete code relies on
		   this. */
		unsigned int i;
		char *wanted_name = talloc_strdup(handle->conn,
					data->directory_list[offset].d_name);
		if (wanted_name == NULL) {
			return;
		}
		SMB_VFS_NEXT_REWINDDIR(handle, data->source_directory);
		open_and_sort_dir(handle, data);
		/* Now search for where we were. */
		data->pos = 0;
		for (i = 0; i < data->number_of_entries; i++) {
			if(strcmp(wanted_name, data->directory_list[i].d_name) == 0) {
				data->pos = i;
				break;
			}
		}
		TALLOC_FREE(wanted_name);
	}
}
示例#17
0
static __be32
nfsd4_block_proc_layoutcommit(struct inode *inode,
		struct nfsd4_layoutcommit *lcp)
{
	loff_t new_size = lcp->lc_last_wr + 1;
	struct iattr iattr = { .ia_valid = 0 };
	struct iomap *iomaps;
	int nr_iomaps;
	int error;

	nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
			lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
	if (nr_iomaps < 0)
		return nfserrno(nr_iomaps);

	if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
	    timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
		lcp->lc_mtime = current_fs_time(inode->i_sb);
	iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
	iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;

	if (new_size > i_size_read(inode)) {
		iattr.ia_valid |= ATTR_SIZE;
		iattr.ia_size = new_size;
	}

	error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
			nr_iomaps, &iattr);
	kfree(iomaps);
	return nfserrno(error);
}

const struct nfsd4_layout_ops bl_layout_ops = {
	/*
	 * Pretend that we send notification to the client.  This is a blatant
	 * lie to force recent Linux clients to cache our device IDs.
	 * We rarely ever change the device ID, so the harm of leaking deviceids
	 * for a while isn't too bad.  Unfortunately RFC5661 is a complete mess
	 * in this regard, but I filed errata 4119 for this a while ago, and
	 * hopefully the Linux client will eventually start caching deviceids
	 * without this again.
	 */
	.notify_types		=
			NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
	.proc_getdeviceinfo	= nfsd4_block_proc_getdeviceinfo,
	.encode_getdeviceinfo	= nfsd4_block_encode_getdeviceinfo,
	.proc_layoutget		= nfsd4_block_proc_layoutget,
	.encode_layoutget	= nfsd4_block_encode_layoutget,
	.proc_layoutcommit	= nfsd4_block_proc_layoutcommit,
};
示例#18
0
/* Get current value of timer TIMERID and store it in VLAUE.  */
int
timer_gettime (timer_t timerid, struct itimerspec *value)
{
  struct timer_node *timer;
  struct timespec now, expiry;
  int retval = -1, armed = 0, valid;
  clock_t clock = 0;

  pthread_mutex_lock (&__timer_mutex);

  timer = timer_id2ptr (timerid);
  valid = timer_valid (timer);

  if (valid) {
    armed = timer->armed;
    expiry = timer->expirytime;
    clock = timer->clock;
    value->it_interval = timer->value.it_interval;
  }

  pthread_mutex_unlock (&__timer_mutex);

  if (valid)
    {
      if (armed)
	{
	  clock_gettime (clock, &now);
	  if (timespec_compare (&now, &expiry) < 0)
	    timespec_sub (&value->it_value, &expiry, &now);
	  else
	    {
	      value->it_value.tv_sec = 0;
	      value->it_value.tv_nsec = 0;
	    }
	}
      else
	{
	  value->it_value.tv_sec = 0;
	  value->it_value.tv_nsec = 0;
	}

      retval = 0;
    }
  else
    __set_errno (EINVAL);

  return retval;
}
示例#19
0
文件: test.c 项目: Walkerks/locks
static void test_sleep(void)
{
  struct timespec ts;
  struct timespec interval;
  struct timespec end_ts;

  interval.tv_sec = 0;
  interval.tv_nsec = NSECS_PER_SECOND / 10;

  /* Calculate current time + 100ms */
  timespec_get(&ts, TIME_UTC);
  timespec_add_nsec(&ts, NSECS_PER_SECOND / 10);

  /* Sleep... */
  thrd_sleep(&interval, NULL);

  timespec_get(&end_ts, TIME_UTC);

  assert(timespec_compare(&ts, &end_ts) <= 0);
}
示例#20
0
int
__timer_thread_queue_timer (struct thread_node *thread,
			    struct timer_node *insert)
{
  struct list_links *iter;
  int athead = 1;

  for (iter = list_first (&thread->timer_queue);
       iter != list_null (&thread->timer_queue);
        iter = list_next (iter))
    {
      struct timer_node *timer = timer_links2ptr (iter);

      if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0)
	  break;
      athead = 0;
    }

  list_insbefore (iter, &insert->links);
  return athead;
}
示例#21
0
static ssize_t command_read(struct file *filp, char __user *buf,
			       size_t count, loff_t *f_pos)
{
	struct es705_priv *es705;
	u32 resp;
	size_t slen;
	int err;
	size_t cnt;

	es705 = (struct es705_priv *)filp->private_data;
	BUG_ON(!es705);
	err = cnt = 0;

	if (timespec_compare(&read_time, &es705->last_resp_time) != 0) {
		resp = es705_resp(es705);
		memcpy(&read_time, &es705->last_resp_time, sizeof(read_time));
		snprintf(readbuf, READBUF_SIZE,
			 "%li.%4li 0x%04hx 0x%04hx\n",
			 read_time.tv_sec, read_time.tv_nsec,
			 resp >> 16, resp & 0xffff);
	}
示例#22
0
文件: timer.c 项目: achrafweb/FreeRDP
int FireExpiredTimerQueueTimers(WINPR_TIMER_QUEUE* timerQueue)
{
	struct timespec CurrentTime;
	WINPR_TIMER_QUEUE_TIMER* node;

	if (!timerQueue->activeHead)
		return 0;

	timespec_gettimeofday(&CurrentTime);
	node = timerQueue->activeHead;

	while (node)
	{
		if (timespec_compare(&CurrentTime, &(node->ExpirationTime)) >= 0)
		{
			node->Callback(node->Parameter, TRUE);
			node->FireCount++;
			timerQueue->activeHead = node->next;
			node->next = NULL;

			if (node->Period)
			{
				timespec_add_ms(&(node->ExpirationTime), node->Period);
				InsertTimerQueueTimer(&(timerQueue->activeHead), node);
			}
			else
			{
				InsertTimerQueueTimer(&(timerQueue->inactiveHead), node);
			}

			node = timerQueue->activeHead;
		}
		else
		{
			break;
		}
	}

	return 0;
}
示例#23
0
static int gfs2_write_inode(struct inode *inode, int sync)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_holder gh;
	struct buffer_head *bh;
	struct timespec atime;
	struct gfs2_dinode *di;
	int ret = 0;

	/* Check this is a "normal" inode, etc */
	if (!test_bit(GIF_USER, &ip->i_flags) ||
	    (current->flags & PF_MEMALLOC))
		return 0;
	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (ret)
		goto do_flush;
	ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
	if (ret)
		goto do_unlock;
	ret = gfs2_meta_inode_buffer(ip, &bh);
	if (ret == 0) {
		di = (struct gfs2_dinode *)bh->b_data;
		atime.tv_sec = be64_to_cpu(di->di_atime);
		atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
		if (timespec_compare(&inode->i_atime, &atime) > 0) {
			gfs2_trans_add_bh(ip->i_gl, bh, 1);
			gfs2_dinode_out(ip, bh->b_data);
		}
		brelse(bh);
	}
	gfs2_trans_end(sdp);
do_unlock:
	gfs2_glock_dq_uninit(&gh);
do_flush:
	if (sync != 0)
		gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
	return ret;
}
示例#24
0
文件: blocklayout.c 项目: Abioy/kasan
static __be32
nfsd4_block_proc_layoutcommit(struct inode *inode,
		struct nfsd4_layoutcommit *lcp)
{
	loff_t new_size = lcp->lc_last_wr + 1;
	struct iattr iattr = { .ia_valid = 0 };
	struct iomap *iomaps;
	int nr_iomaps;
	int error;

	nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
			lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
	if (nr_iomaps < 0)
		return nfserrno(nr_iomaps);

	if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
	    timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
		lcp->lc_mtime = current_fs_time(inode->i_sb);
	iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
	iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;

	if (new_size > i_size_read(inode)) {
		iattr.ia_valid |= ATTR_SIZE;
		iattr.ia_size = new_size;
	}

	error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
			nr_iomaps, &iattr);
	kfree(iomaps);
	return nfserrno(error);
}

const struct nfsd4_layout_ops bl_layout_ops = {
	.proc_getdeviceinfo	= nfsd4_block_proc_getdeviceinfo,
	.encode_getdeviceinfo	= nfsd4_block_encode_getdeviceinfo,
	.proc_layoutget		= nfsd4_block_proc_layoutget,
	.encode_layoutget	= nfsd4_block_encode_layoutget,
	.proc_layoutcommit	= nfsd4_block_proc_layoutcommit,
};
示例#25
0
文件: locking.c 项目: rchicoli/samba
bool set_write_time(struct file_id fileid, struct timespec write_time)
{
	struct share_mode_lock *lck;

	DEBUG(5,("set_write_time: %s id=%s\n",
		 timestring(talloc_tos(),
			    convert_timespec_to_time_t(write_time)),
		 file_id_string_tos(&fileid)));

	lck = get_existing_share_mode_lock(talloc_tos(), fileid);
	if (lck == NULL) {
		return False;
	}

	if (timespec_compare(&lck->data->old_write_time, &write_time) != 0) {
		lck->data->modified = True;
		lck->data->old_write_time = write_time;
	}

	TALLOC_FREE(lck);
	return True;
}
示例#26
0
文件: hello.c 项目: chowp/mac80211
static void print_inf(int t) {
        int j;
	if(t == W2G){
		printk(KERN_DEBUG "\n-------2.4GHz-------\n");
	}else{
		printk(KERN_DEBUG "-------5.0GHz-------\n");
	}	
        printk(KERN_DEBUG "gamma:%ld->%ld\n",inf_start_timestamp.tv_sec,inf_end_timestamp.tv_sec);
        printk(KERN_DEBUG "OVERALL TRANSMITTING TIME:  %ld.%ld\n",summary[t].overall_extra_time.tv_sec,summary[t].overall_extra_time.tv_nsec);
        printk(KERN_DEBUG "OVERALL BUSYWAIT TIME:      %ld.%ld\n",summary[t].overall_busywait.tv_sec,summary[t].overall_busywait.tv_nsec);
        printk(KERN_DEBUG "OVERALL RATEADAPTION TIME:  %ld.%ld\n",summary[t].rate_adaption_time.tv_sec,summary[t].rate_adaption_time.tv_nsec);
        printk(KERN_EMERG "[wAP]%d,[Neighbor]%d, KB/s\n",(int)summary[t].mine_bytes/(int)(FREQUENT_UPDATE_PERIOD_SECONDS*1000),(int)summary[t].sniffer_bytes/(int)(FREQUENT_UPDATE_PERIOD_SECONDS*1000));
	printk(KERN_DEBUG "\nCS:\n");
        for(j = 0 ; j < CS_NUMBER ; j ++){
                if (cs[t][j].value.tv_nsec == 0)
                        break;
                printk(KERN_DEBUG "%s<->%s:%ld second(s) + %ld nanoseconds\n",ether_sprintf_test(cs[t][j].wlan_src),ether_sprintf_test2(cs[t][j].wlan_dst),cs[t][j].value.tv_sec,cs[t][j].value.tv_nsec);
        }

        printk(KERN_DEBUG "\nHT:%ld second(s) + %ld nanoseconds\n",ht[t].tv_sec,ht[t].tv_nsec);
	printk(KERN_DEBUG "Station:%d\n",summary[t].inf_num);
	int inf = timespec_div(timespec_add(summary[t].overall_busywait,ht[t]),summary[t].overall_extra_time);
	printk(KERN_DEBUG "Wing-summary:%d\n",inf);
	int wing_cs_avg = summary[t].wing/summary[t].mine_packets;
	int wing_ht = timespec_div(ht[t],summary[t].overall_extra_time);
	printk(KERN_DEBUG "Wing-average:%d\n",(wing_cs_avg+wing_ht)/2);
	int delay = (summary[t].overall_extra_time.tv_sec*1000+summary[t].overall_extra_time.tv_nsec/1000000)*1000/summary[t].mine_bytes;
	printk(KERN_DEBUG "Delay-summary:%d ms/KB\n",delay);
	if(t == W5G){
		printk(KERN_DEBUG "-------WING_ENDS-------\n");
	}
	//checkpoint
	struct timespec tmp1,tmp2 ={0};
	tmp1 = timespec_add(summary[t].overall_busywait,ht[t]); 
	if( timespec_compare(&tmp1, &summary[t].overall_extra_time) > 0){
		printk(KERN_DEBUG "Interference exceed 100%%\n");
	}

}
示例#27
0
文件: hello.c 项目: chowp/mac80211
struct timespec cal_dmaci_ampdu(int t){
	struct timespec transmit={0},tmp1={0},tmp2={0},difs={0},dmaci={0};
	ampdu[t].th = ampdu[t].tw;
	if (timespec_compare(&ampdu[t].th,&ampdu[t].last_te)<0){
		ampdu[t].th=ampdu[t].last_te;
	}
	transmit = cal_transmit_time(ampdu[t].len*ampdu[t].num,ampdu[t].rate);
        difs.tv_sec =0;
	difs.tv_nsec = CONST_TIME[t]*1000;
	tmp1 = timespec_sub(ampdu[t].te,ampdu[t].th);
	tmp2 = timespec_sub(tmp1,transmit);
	dmaci = timespec_sub(tmp2,difs);
	struct timespec ts_tmp;
	getnstimeofday(&ts_tmp);

	//checkpoint
	if (dmaci.tv_sec < 0 || dmaci.tv_nsec < 0){
		dmaci.tv_sec = 0;
		dmaci.tv_nsec = 0;
	}
	return dmaci;
}
示例#28
0
thread_func (void *arg)
{
  struct thread_node *self = arg;

  /* Register cleanup handler, in case rogue application terminates
     this thread.  (This cannot happen to __timer_signal_thread, which
     doesn't invoke application callbacks). */

  pthread_cleanup_push (thread_cleanup, self);

  pthread_mutex_lock (&__timer_mutex);

  while (1)
    {
      struct list_links *first;
      struct timer_node *timer = NULL;

      /* While the timer queue is not empty, inspect the first node.  */
      first = list_first (&self->timer_queue);
      if (first != list_null (&self->timer_queue))
	{
	  struct timespec now;

	  timer = timer_links2ptr (first);

	  /* This assumes that the elements of the list of one thread
	     are all for the same clock.  */
	  clock_gettime (timer->clock, &now);

	  while (1)
	    {
	      /* If the timer is due or overdue, remove it from the queue.
		 If it's a periodic timer, re-compute its new time and
		 requeue it.  Either way, perform the timer expiry. */
	      if (timespec_compare (&now, &timer->expirytime) < 0)
		break;

	      list_unlink_ip (first);

	      if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0
		  || timer->value.it_interval.tv_nsec != 0)
		{
		  timespec_add (&timer->expirytime, &now,
				&timer->value.it_interval);
		  __timer_thread_queue_timer (self, timer);
		}

	      thread_expire_timer (self, timer);

	      first = list_first (&self->timer_queue);
	      if (first == list_null (&self->timer_queue))
		break;

	      timer = timer_links2ptr (first);
	    }
	}

      /* If the queue is not empty, wait until the expiry time of the
	 first node.  Otherwise wait indefinitely.  Insertions at the
	 head of the queue must wake up the thread by broadcasting
	 this condition variable.  */
      if (timer != NULL)
	pthread_cond_timedwait (&self->cond, &__timer_mutex,
				&timer->expirytime);
      else
	pthread_cond_wait (&self->cond, &__timer_mutex);
    }
  /* This macro will never be executed since the while loop loops
     forever - but we have to add it for proper nesting.  */
  pthread_cleanup_pop (1);

}
static void mm_fmwk_job_scheduler(struct work_struct *work)
{
	mm_job_status_e status = MM_JOB_STATUS_INVALID;
	bool is_hw_busy = false;
	struct dev_job_list *job_list_elem;

	struct mm_core *core_dev = container_of(work, \
					struct mm_core, \
					job_scheduler);
	MM_CORE_HW_IFC *hw_ifc = &core_dev->mm_device;

	if (plist_head_empty(&core_dev->job_list))
		return;

	job_list_elem = plist_first_entry(\
			&(core_dev->job_list), \
			struct dev_job_list, core_list);

	if (mm_core_enable_clock(core_dev))
		goto mm_fmwk_job_scheduler_done;

	is_hw_busy = hw_ifc->mm_get_status(hw_ifc->mm_device_id);
	if (!is_hw_busy) {
		if (job_list_elem->job.size) {

			if (job_list_elem->job.status == MM_JOB_STATUS_READY)
				clean_cnt++;

			if (job_list_elem->job.status == MM_JOB_STATUS_DIRTY) {
				mm_common_cache_clean();
				dirty_cnt++;
				if ((dirty_cnt % 1000) == 0)
					pr_debug("mm jobs dirty=%d, clean=%d\n",
					dirty_cnt, clean_cnt);
			}

			status	= hw_ifc->mm_start_job(\
					hw_ifc->mm_device_id, \
					&job_list_elem->job, 0);
			if (status < MM_JOB_STATUS_SUCCESS) {
				getnstimeofday(&core_dev->sched_time);
				timespec_add_ns(\
				&core_dev->sched_time, \
				hw_ifc->mm_timeout * NSEC_PER_MSEC);
				core_dev->mm_core_idle = false;

				is_hw_busy = true;
				pr_debug("job posted ");

				raw_notifier_call_chain(\
				&core_dev->mm_common->notifier_head, \
				MM_FMWK_NOTIFY_JOB_STARTED, NULL);

				}
			else {
				core_dev->mm_core_idle = true;
				job_list_elem->job.status \
				= MM_JOB_STATUS_SUCCESS;
				mm_common_job_completion(\
					job_list_elem, core_dev);
				SCHEDULER_WORK(core_dev, \
					&core_dev->job_scheduler);
				}
			}
		else {
			job_list_elem->job.status \
				= MM_JOB_STATUS_SUCCESS;
			mm_common_job_completion(\
				job_list_elem, core_dev);
			SCHEDULER_WORK(core_dev, \
				&core_dev->job_scheduler);
			}
		}
	else {
		struct timespec cur_time;
		getnstimeofday(&cur_time);
		if (timespec_compare(&cur_time, &core_dev->sched_time) > 0) {
			pr_err("abort hw ");
				hw_ifc->mm_abort(hw_ifc->mm_device_id, \
				&job_list_elem->job);
			core_dev->mm_core_idle = true;
			is_hw_busy = false;
			SCHEDULER_WORK(core_dev, &core_dev->job_scheduler);
			}
		}

	if (is_hw_busy) {
		mod_timer(&core_dev->dev_timer, \
			jiffies + msecs_to_jiffies(hw_ifc->mm_timer));
		pr_debug("mod_timer  %lx %lx", \
				jiffies, \
				msecs_to_jiffies(hw_ifc->mm_timer));
		return;
		}

mm_fmwk_job_scheduler_done:
	mm_core_disable_clock(core_dev);
}
示例#30
0
文件: reserve.c 项目: adwaitnd/erts
long sys_set_reserve(pid_t pid, struct timespec __user *user_C,
                     struct timespec __user *user_T, int cid) {

  struct cpumask set;
  struct timespec T, C, empty;
  struct pid *pid_struct;
  struct task_struct *task;
  struct task_struct *tmp;
  int i;
  int cpu_task_count[] = {0, 0, 0, 0};

  set_normalized_timespec(&empty, 0, 0);

  // locate the task_struct for the task required
  if (pid == 0) {
    task = current;
  } else {
    rcu_read_lock();
    pid_struct = find_get_pid(pid);
    if (!pid_struct) {
      rcu_read_unlock();
      return -ENODEV;
    }
    task = pid_task(pid_struct, PIDTYPE_PID);
    if (!task) {
      rcu_read_unlock();
      return -ENODEV;
    }
    rcu_read_unlock();
  }

  // get timespec struct info
  if (copy_from_user(&C, user_C, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy C from user\n");
    return -EFAULT;
  }

  if (copy_from_user(&T, user_T, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy T from user\n");
    return -EFAULT;
  }

  // check for timespec validity
  if ((timespec_compare(&T, &C) < 0) || !timespec_valid(&T) || !timespec_valid(&C) ||
      (cid >= NUM_CPUS)) {
    printk(KERN_ALERT "[sys_set_reserve] invalid T and C\n");
    return -EINVAL;
  }

  // do a reservation admission check
  cid = admission_check(task, C, T, cid);
  if (cid < 0) {
    return -EBUSY;
  }

  if (set_reserve_hook(task) != 0) {
    return -EFAULT;
  }

  // cancel any old timers for an updated reservation
  if (hrtimer_active(&(task->C_timer))) {
    hrtimer_cancel(&(task->C_timer));
  }
  if (hrtimer_active(&(task->T_timer))) {
    hrtimer_cancel(&(task->T_timer));
  }

  // make runnable any task suspended by enforcement
  if (task->put_to_sleep) {
    task->put_to_sleep = 0;
    wake_up_process(task);
  }

  // copy into task struct ktime values
  task->real_C_time = ktime_set(0, 0);
  task->C_time = ktime_set(C.tv_sec, C.tv_nsec);
  task->T_time = ktime_set(T.tv_sec, T.tv_nsec);

  // find what cpus have tasks on them
  rcu_read_lock();
  for_each_process(tmp) {
    if (tmp->has_reservation) {
      cpu_task_count[task_cpu(tmp)] = 1;
    }
  }
  rcu_read_unlock();

  cpu_task_count[cid] = 1;
  task->reserve_cpu = cid;
  // Bring offline all cpus with no tasks
  for (i = 0; i < NUM_CPUS; i ++) {
    if (cpu_task_count[i] == 0) {
      if (power_cpu(i, 0) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn off cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned OFF CPU %d\n", i);
    } else {
      if (power_cpu(i, 1) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn on cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned ON CPU %d\n", i);
    }
  }

  // set process CPU
  cpumask_clear(&set);
  cpumask_set_cpu(cid, &set);
  if (sched_setaffinity(pid, &set)) {
    printk(KERN_ALERT"[sys_set_reserve] failed to set CPU affinity\n");
    goto fail;
  }

  printk(KERN_ALERT "[sys_set_reserve] PID %d (C = %lld ms / T = %lld ms) CPU %u\n",
         pid, ktime_to_ms(task->C_time), ktime_to_ms(task->T_time), cid);

  // mark as having a reservation
  task->has_reservation = 1;

  // set the frequency based on sysclock algorithm
  sysclock_set();

  return 0;

  fail:
    if (task->has_reservation || task->energymon_node) {
      cancel_reserve_hook(task);
    }
    return -EINVAL;
}