Example #1
0
File: file.c Project: nhanh0/hah
int expand_fd_array(struct files_struct *files, int nr)
{
    struct file **new_fds;
    int error, nfds;


    error = -EMFILE;
    if (files->max_fds >= NR_OPEN || nr >= NR_OPEN)
        goto out;

    nfds = files->max_fds;
    write_unlock(&files->file_lock);

    /*
     * Expand to the max in easy steps, and keep expanding it until
     * we have enough for the requested fd array size.
     */

    do {
#if NR_OPEN_DEFAULT < 256
        if (nfds < 256)
            nfds = 256;
        else
#endif
            if (nfds < (PAGE_SIZE / sizeof(struct file *)))
                nfds = PAGE_SIZE / sizeof(struct file *);
            else {
                nfds = nfds * 2;
                if (nfds > NR_OPEN)
                    nfds = NR_OPEN;
            }
    } while (nfds <= nr);

    error = -ENOMEM;
    new_fds = alloc_fd_array(nfds);
    write_lock(&files->file_lock);
    if (!new_fds)
        goto out;

    /* Copy the existing array and install the new pointer */

    if (nfds > files->max_fds) {
        struct file **old_fds;
        int i;

        old_fds = xchg(&files->fd, new_fds);
        i = xchg(&files->max_fds, nfds);

        /* Don't copy/clear the array if we are creating a new
           fd array for fork() */
        if (i) {
            memcpy(new_fds, old_fds, i * sizeof(struct file *));
            /* clear the remainder of the array */
            memset(&new_fds[i], 0,
                   (nfds-i) * sizeof(struct file *));

            write_unlock(&files->file_lock);
            free_fd_array(old_fds, i);
            write_lock(&files->file_lock);
        }
    } else {
        /* Somebody expanded the array while we slept ... */
        write_unlock(&files->file_lock);
        free_fd_array(new_fds, nfds);
        write_lock(&files->file_lock);
    }
    error = 0;
out:
    return error;
}
void SortedVectorMapRow<V>::ApplyInc(int32_t column_id, const void *update) {
  std::unique_lock<SharedMutex> write_lock(rw_mutex_);
  ApplyIncUnsafe(column_id, update);
}
double SortedVectorMapRow<V>::ApplyIncGetImportance(int32_t column_id,
                                                    const void *update) {
  std::unique_lock<SharedMutex> write_lock(rw_mutex_);
  return ApplyIncUnsafeGetImportance(column_id, update);
}
Example #4
0
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
			   struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int offset, end;
	struct net *net = dev_net(skb_dst(skb)->dev);

	if (fq->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
				 IPSTATS_MIB_INHDRERRORS);
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
				  ((u8 *)&fhdr->frag_off -
				   skb_network_header(skb)));
		return -1;
	}

	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
		skb->csum = csum_sub(skb->csum,
				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
						  0));
	}

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
			goto err;
		fq->q.last_in |= INET_FRAG_LAST_IN;
		fq->q.len = end;
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
					 IPSTATS_MIB_INHDRERRORS);
			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
					  offsetof(struct ipv6hdr, payload_len));
			return -1;
		}
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
			if (fq->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			fq->q.len = end;
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
		goto err;

	if (pskb_trim_rcsum(skb, end - offset))
		goto err;

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
	if (!prev || FRAG6_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for(next = fq->q.fragments; next != NULL; next = next->next) {
		if (FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

found:
	/* RFC5722, Section 4, amended by Errata ID : 3089
	 *                          When reassembling an IPv6 datagram, if
	 *   one or more its constituent fragments is determined to be an
	 *   overlapping fragment, the entire datagram (and any constituent
	 *   fragments) MUST be silently discarded.
	 */

	/* Check for overlap with preceding fragment. */
	if (prev &&
	    (FRAG6_CB(prev)->offset + prev->len) > offset)
		goto discard_fq;

	/* Look for overlap with succeeding segment. */
	if (next && FRAG6_CB(next)->offset < end)
		goto discard_fq;

	FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		fq->iif = dev->ifindex;
		skb->dev = NULL;
	}
	fq->q.stamp = skb->tstamp;
	fq->q.meat += skb->len;
	atomic_add(skb->truesize, &fq->q.net->mem);

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->q.last_in |= INET_FRAG_FIRST_IN;
	}

	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		res = ip6_frag_reasm(fq, prev, dev);
		skb->_skb_refdst = orefdst;
		return res;
	}

	skb_dst_drop(skb);

	write_lock(&ip6_frags.lock);
	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
	write_unlock(&ip6_frags.lock);
	return -1;

discard_fq:
	fq_kill(fq);
err:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_REASMFAILS);
	kfree_skb(skb);
	return -1;
}
Example #5
0
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int flags, offset;
	int ihl, end;
	int err = -ENOENT;
	u8 ecn;

	if (qp->q.last_in & INET_FRAG_COMPLETE)
		goto err;

	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
	    unlikely(ip_frag_too_far(qp)) &&
	    unlikely(err = ip_frag_reinit(qp))) {
		ipq_kill(qp);
		goto err;
	}

	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
	offset = ntohs(ip_hdr(skb)->frag_off);
	flags = offset & ~IP_OFFSET;
	offset &= IP_OFFSET;
	offset <<= 3;		/* offset is in 8-byte chunks */
	ihl = ip_hdrlen(skb);

	/* Determine the position of this fragment. */
	end = offset + skb->len - skb_network_offset(skb) - ihl;
	err = -EINVAL;

	/* Is this the final fragment? */
	if ((flags & IP_MF) == 0) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < qp->q.len ||
		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
			goto err;
		qp->q.last_in |= INET_FRAG_LAST_IN;
		qp->q.len = end;
	} else {
		if (end&7) {
			end &= ~7;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
		if (end > qp->q.len) {
			/* Some bits beyond end -> corruption. */
			if (qp->q.last_in & INET_FRAG_LAST_IN)
				goto err;
			qp->q.len = end;
		}
	}
	if (end == offset)
		goto err;

	err = -ENOMEM;
	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
		goto err;

	err = pskb_trim_rcsum(skb, end - offset);
	if (err)
		goto err;

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = qp->q.fragments_tail;
	if (!prev || FRAG_CB(prev)->offset < offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = qp->q.fragments; next != NULL; next = next->next) {
		if (FRAG_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

found:
	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (FRAG_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			err = -EINVAL;
			if (end <= offset)
				goto err;
			err = -ENOMEM;
			if (!pskb_pull(skb, i))
				goto err;
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	err = -ENOMEM;

	while (next && FRAG_CB(next)->offset < end) {
		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
			if (!pskb_pull(next, i))
				goto err;
			FRAG_CB(next)->offset += i;
			qp->q.meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragment is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
				qp->q.fragments = next;

			qp->q.meat -= free_it->len;
			frag_kfree_skb(qp->q.net, free_it);
		}
	}

	FRAG_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		qp->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		qp->q.fragments = skb;

	dev = skb->dev;
	if (dev) {
		qp->iif = dev->ifindex;
		skb->dev = NULL;
	}
	qp->q.stamp = skb->tstamp;
	qp->q.meat += skb->len;
	qp->ecn |= ecn;
	atomic_add(skb->truesize, &qp->q.net->mem);
	if (offset == 0)
		qp->q.last_in |= INET_FRAG_FIRST_IN;

	if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
	    skb->len + ihl > qp->q.max_size)
		qp->q.max_size = skb->len + ihl;

	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    qp->q.meat == qp->q.len) {
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		err = ip_frag_reasm(qp, prev, dev);
		skb->_skb_refdst = orefdst;
		return err;
	}

	skb_dst_drop(skb);

	write_lock(&ip4_frags.lock);
	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
	write_unlock(&ip4_frags.lock);
	return -EINPROGRESS;

err:
	kfree_skb(skb);
	return err;
}
Example #6
0
static ssize_t my_write(struct file *fp, const char __user *buf, size_t len, loff_t *off) {
  write_lock(&rwlock);
  data = data==1 ? 0 : 1; //NORACE!
  write_unlock(&rwlock);
  return 0;
}
Example #7
0
/**
 * insert range in fld store.
 *
 *      \param  range  range to be inserted
 *      \param  th     transaction for this operation as it could compound
 *                     transaction.
 *
 *      \retval  0  success
 *      \retval  -ve error
 *
 * The whole fld index insertion is protected by seq->lss_mutex (see
 * seq_server_alloc_super), i.e. only one thread will access fldb each
 * time, so we do not need worry the fld file and cache will being
 * changed between declare and create.
 * Because the fld entry can only be increamental, so we will only check
 * whether it can be merged from the left.
 *
 * Caller must hold fld->lsf_lock
 **/
int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
		     const struct lu_seq_range *new_range, struct thandle *th)
{
	struct lu_seq_range	*range;
	struct lu_seq_range	*tmp;
	struct fld_thread_info	*info;
	int			rc = 0;
	int			deleted = 0;
	struct fld_cache_entry	*flde;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);

	LASSERT(mutex_is_locked(&fld->lsf_lock));

	range = &info->fti_lrange;
	memset(range, 0, sizeof(*range));
	tmp = &info->fti_irange;
	rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
	if (rc != -ENOENT) {
		rc = rc == 0 ? -EEXIST : rc;
		GOTO(out, rc);
	}

	if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
	    lu_seq_range_compare_loc(new_range, range) == 0) {
		range_cpu_to_be(tmp, range);
		rc = dt_delete(env, fld->lsf_obj,
			       (struct dt_key *)&tmp->lsr_start, th);
		if (rc != 0)
			GOTO(out, rc);
		*tmp = *new_range;
		tmp->lsr_start = range->lsr_start;
		deleted = 1;
	} else {
		*tmp = *new_range;
	}

	range_cpu_to_be(tmp, tmp);
	rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
		       (struct dt_key *)&tmp->lsr_start, th, 1);
	if (rc != 0) {
		CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
		       fld->lsf_name, PRANGE(new_range), rc);
		GOTO(out, rc);
	}

	flde = fld_cache_entry_create(new_range);
	if (IS_ERR(flde))
		GOTO(out, rc = PTR_ERR(flde));

	write_lock(&fld->lsf_cache->fci_lock);
	if (deleted)
		fld_cache_delete_nolock(fld->lsf_cache, new_range);
	rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
	write_unlock(&fld->lsf_cache->fci_lock);
	if (rc)
		OBD_FREE_PTR(flde);
out:
	RETURN(rc);
}
Example #8
0
int powernet_snmp_ups_check_state(UPSINFO *ups)
{
   struct snmp_ups_internal_data *Sid =
      (struct snmp_ups_internal_data *)ups->driver_internal_data;
   fd_set fds;
   int numfds, rc, block;
   struct timeval tmo, exit, now;
   int sleep_time;

   /* Check for commlost under lock since UPS status might be changed */
   write_lock(ups);
   rc = powernet_check_comm_lost(ups);
   write_unlock(ups);
   if (rc == 0)
      return 0;

   sleep_time = ups->wait_time;

   /* If we're not doing SNMP traps, just sleep and exit */
   if (!Sid->trap_session) {
      sleep(sleep_time);
      return 1;
   }

   /* Figure out when we need to exit by */
   gettimeofday(&exit, NULL);
   exit.tv_sec += sleep_time;

   while(1)
   {
      /* Figure out how long until we have to exit */
      gettimeofday(&now, NULL);

      if (now.tv_sec > exit.tv_sec ||
         (now.tv_sec == exit.tv_sec &&
            now.tv_usec >= exit.tv_usec)) {
         /* Done already? How time flies... */
         return 0;
      }

      tmo.tv_sec = exit.tv_sec - now.tv_sec;
      tmo.tv_usec = exit.tv_usec - now.tv_usec;
      if (tmo.tv_usec < 0) {
         tmo.tv_sec--;              /* Normalize */
         tmo.tv_usec += 1000000;
      }

      /* Get select parameters from SNMP library */
      FD_ZERO(&fds);
      block = 0;
      numfds = 0;
      snmp_select_info(&numfds, &fds, &tmo, &block);

      /* Wait for something to happen */
      rc = select(numfds, &fds, NULL, NULL, &tmo);
      switch (rc) {
      case 0:  /* Timeout */
         /* Tell SNMP library about the timeout */
         snmp_timeout();
         break;

      case -1: /* Error */
         if (errno == EINTR || errno == EAGAIN)
            continue;            /* assume SIGCHLD */

         Dmsg1(200, "select error: ERR=%s\n", strerror(errno));
         return 0;

      default: /* Data available */
         /* Reset trap flag and run callback processing */
         Sid->trap_received = false;
         snmp_read(&fds);

         /* If callback processing set the flag, we got a trap */
         if (Sid->trap_received)
            return 1;

         break;
      }
   }
}
static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
{
	write_lock(&nf_ct_frag6_lock);
	__fq_unlink(fq);
	write_unlock(&nf_ct_frag6_lock);
}
Example #10
0
 void RWMutex::upgrade_read_to_write_lock()
 {
   read_unlock();
   write_lock();
 }
Example #11
0
static void neigh_expire_timer_check(unsigned long dummy)
{
    int i;
    unsigned long flags;

    save_flags(flags);
    cli();

    write_lock(&clip_tbl.lock);

    for (i = 0; i <= NEIGH_HASHMASK; i++)
    {
	struct neighbour **np;

	for (np = &clip_tbl.hash_buckets[i]; *np;)
	{
	    struct neighbour *n = *np;
	    struct atmarp_entry *entry = NEIGH2ENTRY(n);
	    struct clip_vcc *clip_vcc;
	    int need_resolving = 1;

    	    if (time_before(jiffies + CLIP_ENTRY_EXPIRE / 4, entry->expires))
		need_resolving = 0;

	    for (clip_vcc = entry->vccs; clip_vcc; clip_vcc = clip_vcc->next)
	    {
		if (clip_vcc->idle_timeout && time_after(jiffies, 
		    clip_vcc->last_use + clip_vcc->idle_timeout))
		{
		    DPRINTK("releasing vcc %p->%p of entry %p\n",
			clip_vcc, clip_vcc->vcc, entry);
		    atm_async_release_vcc(clip_vcc->vcc, -ETIMEDOUT);
		}
		else if (need_resolving && !clip_vcc->resolve_timeout)
		    clip_start_resolving(clip_vcc);
	    }

	    if (!need_resolving || time_before(jiffies, entry->expires))
	    {
		np = &n->next;
		continue;
	    }
	    
	    if (atomic_read(&n->refcnt) > 1)
	    {
		struct sk_buff *skb;

		DPRINTK("destruction postponed with ref %d\n",
		    atomic_read(&n->refcnt));
		while ((skb = skb_dequeue(&n->arp_queue)))
		    dev_kfree_skb(skb);
		np = &n->next;
		continue;
	    }
	    *np = n->next;
	    DPRINTK("expired neigh %p\n",n);
	    n->dead = 1;
	    neigh_release(n);
	}
    }
    mod_timer(&neigh_expire_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
    write_unlock(&clip_tbl.lock);
    restore_flags(flags);
}
Example #12
0
/*
 * bind a local address to an RxRPC socket
 */
static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
	struct rxrpc_local *local;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	u16 service_id;
	int ret;

	_enter("%p,%p,%d", rx, saddr, len);

	ret = rxrpc_validate_address(rx, srx, len);
	if (ret < 0)
		goto error;
	service_id = srx->srx_service;

	lock_sock(&rx->sk);

	switch (rx->sk.sk_state) {
	case RXRPC_UNBOUND:
		rx->srx = *srx;
		local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
		if (IS_ERR(local)) {
			ret = PTR_ERR(local);
			goto error_unlock;
		}

		if (service_id) {
			write_lock(&local->services_lock);
			if (rcu_access_pointer(local->service))
				goto service_in_use;
			rx->local = local;
			rcu_assign_pointer(local->service, rx);
			write_unlock(&local->services_lock);

			rx->sk.sk_state = RXRPC_SERVER_BOUND;
		} else {
			rx->local = local;
			rx->sk.sk_state = RXRPC_CLIENT_BOUND;
		}
		break;

	case RXRPC_SERVER_BOUND:
		ret = -EINVAL;
		if (service_id == 0)
			goto error_unlock;
		ret = -EADDRINUSE;
		if (service_id == rx->srx.srx_service)
			goto error_unlock;
		ret = -EINVAL;
		srx->srx_service = rx->srx.srx_service;
		if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
			goto error_unlock;
		rx->second_service = service_id;
		rx->sk.sk_state = RXRPC_SERVER_BOUND2;
		break;

	default:
		ret = -EINVAL;
		goto error_unlock;
	}

	release_sock(&rx->sk);
	_leave(" = 0");
	return 0;

service_in_use:
	write_unlock(&local->services_lock);
	rxrpc_put_local(local);
	ret = -EADDRINUSE;
error_unlock:
	release_sock(&rx->sk);
error:
	_leave(" = %d", ret);
	return ret;
}
Example #13
0
/*
 * Find an empty file descriptor entry, and mark it busy.
 */
int get_unused_fd(void)
{
    struct files_struct * files = current->files;
    int fd, error;

    error = -EMFILE;
    write_lock(&files->file_lock);

repeat:
    fd = find_next_zero_bit(files->open_fds,
                            files->max_fdset,
                            files->next_fd);

    /*
     * N.B. For clone tasks sharing a files structure, this test
     * will limit the total number of files that can be opened.
     */
    if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
        goto out;

    /* Do we need to expand the fdset array? */
    if (fd >= files->max_fdset)
    {
        error = expand_fdset(files, fd);
        if (!error)
        {
            error = -EMFILE;
            goto repeat;
        }
        goto out;
    }

    /*
     * Check whether we need to expand the fd array.
     */
    if (fd >= files->max_fds)
    {
        error = expand_fd_array(files, fd);
        if (!error)
        {
            error = -EMFILE;
            goto repeat;
        }
        goto out;
    }

    FD_SET(fd, files->open_fds);
    FD_CLR(fd, files->close_on_exec);
    files->next_fd = fd + 1;
#if 1
    /* Sanity check */
    if (files->fd[fd] != NULL)
    {
        printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
        files->fd[fd] = NULL;
    }
#endif
    error = fd;

out:
    write_unlock(&files->file_lock);
    return error;
}
Example #14
0
/*
 * Set the attributes of an object
 *
 * The transaction passed to this routine must have
 * dmu_tx_hold_bonus(tx, oid) called and then assigned
 * to a transaction group.
 */
static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
			const struct lu_attr *la, struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	sa_bulk_attr_t		*bulk = osd_oti_get(env)->oti_attr_bulk;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	struct osa_attr		*osa = &info->oti_osa;
	__u64			 valid = la->la_valid;
	int			 cnt;
	int			 rc = 0;

	ENTRY;

	down_read(&obj->oo_guard);
	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = -ENOENT);

	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_sa_hdl);

	oh = container_of0(handle, struct osd_thandle, ot_super);
	/* Assert that the transaction has been assigned to a
	   transaction group. */
	LASSERT(oh->ot_tx->tx_txg != 0);

	/* Only allow set size for regular file */
	if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
		valid &= ~(LA_SIZE | LA_BLOCKS);

	if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
		valid &= ~LA_CTIME;

	if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
		valid &= ~LA_MTIME;

	if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
		valid &= ~LA_ATIME;

	if (valid == 0)
		GOTO(out, rc = 0);

	if (valid & LA_FLAGS) {
		struct lustre_mdt_attrs *lma;
		struct lu_buf buf;

		if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
			CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
			lma = (struct lustre_mdt_attrs *)&info->oti_buf;
			buf.lb_buf = lma;
			buf.lb_len = sizeof(info->oti_buf);
			rc = osd_xattr_get(env, &obj->oo_dt, &buf,
					   XATTR_NAME_LMA);
			if (rc > 0) {
				lma->lma_incompat =
					le32_to_cpu(lma->lma_incompat);
				lma->lma_incompat |=
					lustre_to_lma_flags(la->la_flags);
				lma->lma_incompat =
					cpu_to_le32(lma->lma_incompat);
				buf.lb_buf = lma;
				buf.lb_len = sizeof(*lma);
				rc = osd_xattr_set_internal(env, obj, &buf,
							    XATTR_NAME_LMA,
							    LU_XATTR_REPLACE,
							    oh);
			}
			if (rc < 0) {
				CWARN("%s: failed to set LMA flags: rc = %d\n",
				       osd->od_svname, rc);
				RETURN(rc);
			}
		}
	}

	/* do both accounting updates outside oo_attr_lock below */
	if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
		/* Update user accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
					la->la_uid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_uid, rc);
		rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
					obj->oo_attr.la_uid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_uid, rc);
	}
	if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
		/* Update group accounting. Failure isn't fatal, but we still
		 * log an error message */
		rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
					la->la_gid, 1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname, la->la_gid, rc);
		rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
					obj->oo_attr.la_gid, -1, oh->ot_tx);
		if (rc)
			CERROR("%s: failed to update accounting ZAP for user "
				"%d (%d)\n", osd->od_svname,
				obj->oo_attr.la_gid, rc);
	}

	write_lock(&obj->oo_attr_lock);
	cnt = 0;
	if (valid & LA_ATIME) {
		osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
				 osa->atime, 16);
	}
	if (valid & LA_MTIME) {
		osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
				 osa->mtime, 16);
	}
	if (valid & LA_CTIME) {
		osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
				 osa->ctime, 16);
	}
	if (valid & LA_MODE) {
		/* mode is stored along with type, so read it first */
		obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
			(la->la_mode & ~S_IFMT);
		osa->mode = obj->oo_attr.la_mode;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
				 &osa->mode, 8);
	}
	if (valid & LA_SIZE) {
		osa->size = obj->oo_attr.la_size = la->la_size;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
				 &osa->size, 8);
	}
	if (valid & LA_NLINK) {
		osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
				 &osa->nlink, 8);
	}
	if (valid & LA_RDEV) {
		osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
				 &osa->rdev, 8);
	}
	if (valid & LA_FLAGS) {
		osa->flags = attrs_fs2zfs(la->la_flags);
		/* many flags are not supported by zfs, so ensure a good cached
		 * copy */
		obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
				 &osa->flags, 8);
	}
	if (valid & LA_UID) {
		osa->uid = obj->oo_attr.la_uid = la->la_uid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
				 &osa->uid, 8);
	}
	if (valid & LA_GID) {
		osa->gid = obj->oo_attr.la_gid = la->la_gid;
		SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
				 &osa->gid, 8);
	}
	obj->oo_attr.la_valid |= valid;
	write_unlock(&obj->oo_attr_lock);

	LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
	rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);

out:
	up_read(&obj->oo_guard);
	RETURN(rc);
}
Example #15
0
File: miscd.c Project: wyat/kbs
static int miscd_dodaemon(char *argv1, char *daemon)
{
    struct sigaction act;
    char *commandline;
    char commbuf[10];
    char ch;

    if (!check_file_writable(PASSFILE))
    {
        fprintf(stderr, "Error! File %s is not writable.\n", PASSFILE);
        exit(-1);
    }
    if (!check_file_writable(BOARDS))
    {
        fprintf(stderr, "Error! File %s is not writable.\n", BOARDS);
        exit(-1);
    }
    truncate(BOARDS, MAXBOARD * sizeof(struct boardheader));

    if (load_ucache() != 0) {
        printf("ft,load ucache error!");
        exit(-1);
    }

    /* init tmpfs */
    sprintf(genbuf1,"%s/home",TMPFSROOT);
    mkdir(genbuf1,0700);
    sprintf(genbuf1,"%s/boards",TMPFSROOT);
    mkdir(genbuf1,0700);
    for (ch='A';ch<='Z';ch++) {
    sprintf(genbuf1,"%s/home/%c",TMPFSROOT,ch);
    mkdir(genbuf1,0700);
    }

    resolve_boards();
    resolve_utmp();
    resolve_guest_table();

    if (argv1 != NULL) {
        switch (fork()) {
        case -1:
            printf("faint, i can't fork.\n");
            exit(0);
            break;
        case 0:
            break;
        default:
            exit(0);
            break;
        }
        commandline = argv1;
    } else {
        commandline = commbuf;
    }
    setsid();
#if defined(FREEBSD) || defined(MACOSX)
    setpgid(0, 0);
#else
    // by zixia setpgrp(0, 0);
    setpgrp();
#endif
#ifdef AIX
    act.sa_handler = NULL;
    act.sa_flags = SA_RESTART | SA_NOCLDWAIT;
    sigaction(SIGCHLD, &act, NULL);
#else
    act.sa_handler = reaper;
    act.sa_flags = SA_RESTART;
    sigaction(SIGCHLD, &act, NULL);
#endif
    if (((daemon == NULL) || (!strcmp(daemon, "timed"))) && ((argv1 == NULL) || fork())) {
        strcpy(commandline, "timed");
        timed();
        exit(0);
    }

    if (((daemon == NULL) || (!strcmp(daemon, "killd"))) && ((argv1 == NULL) || fork())) {
        strcpy(commandline, "killd");
        char line[20];
        const char *path = "var/killd.pid";
        int pidfd = open(path, O_RDWR | O_CREAT, 0660);
        if (write_lock(pidfd, 0, SEEK_SET, 0) < 0) {
            bbslog("3error", "killd had already been started!");
            exit(-1);
        }
        snprintf(line, sizeof(line), "%ld\n", (long)getpid());
        ftruncate(pidfd, 0);
        write(pidfd, line, strlen(line));

        while (1) {
            time_t ft;

            ft = getnextday4am();
            do {
                sleep(ft - time(0));
            } while (ft > time(0));

            if (argv1 == NULL) {
                dokilluser();
                //doupdategiveupuser();
            } else {
                switch (fork()) {
                case -1:
                    bbslog("3error", "fork failed\n");
                    break;
                case 0:
                    dokilluser();
                    //doupdategiveupuser();
                    exit(0);
                    break;
                default:
                    break;
                }
            }
            switch (fork()) {
                case -1:
                    bbslog("3error", "fork failed\n");
                    break;
                case 0:
                    dokillalldir();
                    exit(0);
                    break;
                default:
                    break;
            }
        };
        exit(0);
    }
    if (((daemon == NULL) || (!strcmp(daemon, "userd"))) && ((argv1 == NULL) || fork())) {
        strcpy(commandline, "userd");
        userd();
        exit(0);
    }
    if ((daemon == NULL) || (!strcmp(daemon, "flushd"))) {
        strcpy(commandline, "flushd");
        flushd();
        exit(0);
    }
    return 0;
}
static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 
			     struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	int offset, end;

	if (fq->last_in & COMPLETE) {
		DEBUGP("Allready completed\n");
		goto err;
	}

	offset = ntohs(fhdr->frag_off) & ~0x7;
	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		DEBUGP("offset is too large.\n");
 		return -1;
	}

 	if (skb->ip_summed == CHECKSUM_HW)
 		skb->csum = csum_sub(skb->csum,
 				     csum_partial(skb->nh.raw,
						  (u8*)(fhdr + 1) - skb->nh.raw,
						  0));

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->len ||
		    ((fq->last_in & LAST_IN) && end != fq->len)) {
			DEBUGP("already received last fragment\n");
			goto err;
		}
		fq->last_in |= LAST_IN;
		fq->len = end;
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
			DEBUGP("the end of this fragment is not rounded to 8 bytes.\n");
			return -1;
		}
		if (end > fq->len) {
			/* Some bits beyond end -> corruption. */
			if (fq->last_in & LAST_IN) {
				DEBUGP("last packet already reached.\n");
				goto err;
			}
			fq->len = end;
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
		DEBUGP("queue: message is too short.\n");
		goto err;
	}
	if (end-offset < skb->len) {
		if (pskb_trim(skb, end - offset)) {
			DEBUGP("Can't trim\n");
			goto err;
		}
		if (skb->ip_summed != CHECKSUM_UNNECESSARY)
			skb->ip_summed = CHECKSUM_NONE;
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = NULL;
	for (next = fq->fragments; next != NULL; next = next->next) {
		if (NFCT_FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			if (end <= offset) {
				DEBUGP("overlap\n");
				goto err;
			}
			if (!pskb_pull(skb, i)) {
				DEBUGP("Can't pull\n");
				goto err;
			}
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	/* Look for overlap with succeeding segments.
	 * If we can merge fragments, do it.
	 */
	while (next && NFCT_FRAG6_CB(next)->offset < end) {
		/* overlap is 'i' bytes */
		int i = end - NFCT_FRAG6_CB(next)->offset;

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
			DEBUGP("Eat head of the overlapped parts.: %d", i);
			if (!pskb_pull(next, i))
				goto err;

			/* next fragment */
			NFCT_FRAG6_CB(next)->offset += i;
			fq->meat -= i;
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragmnet is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
				fq->fragments = next;

			fq->meat -= free_it->len;
			frag_kfree_skb(free_it, NULL);
		}
	}

	NFCT_FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (prev)
		prev->next = skb;
	else
		fq->fragments = skb;

	skb->dev = NULL;
	skb_get_timestamp(skb, &fq->stamp);
	fq->meat += skb->len;
	atomic_add(skb->truesize, &nf_ct_frag6_mem);

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
		fq->last_in |= FIRST_IN;
	}
	write_lock(&nf_ct_frag6_lock);
	list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
	write_unlock(&nf_ct_frag6_lock);
	return 0;

err:
	return -1;
}
Example #17
0
void JSONEnumService::update_enum()
{
  // Check whether the file exists.
  struct stat s;
  if ((stat(_configuration.c_str(), &s) != 0) &&
      (errno == ENOENT))
  {
    TRC_STATUS("No ENUM configuration (file %s does not exist)",
               _configuration.c_str());
    CL_SPROUT_ENUM_FILE_MISSING.log(_configuration.c_str());
    return;
  }

  TRC_STATUS("Loading ENUM configuration from %s", _configuration.c_str());

  // Read from the file
  std::ifstream fs(_configuration.c_str());
  std::string enum_str((std::istreambuf_iterator<char>(fs)),
                        std::istreambuf_iterator<char>());

  if (enum_str == "")
  {
    // LCOV_EXCL_START
    TRC_ERROR("Failed to read ENUM configuration data from %s",
              _configuration.c_str());
    CL_SPROUT_ENUM_FILE_EMPTY.log(_configuration.c_str());
    return;
    // LCOV_EXCL_STOP
  }

  // Now parse the document
  rapidjson::Document doc;
  doc.Parse<0>(enum_str.c_str());

  if (doc.HasParseError())
  {
    TRC_ERROR("Failed to read ENUM configuration data: %s\nError: %s",
              enum_str.c_str(),
              rapidjson::GetParseError_En(doc.GetParseError()));
    CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str());
    return;
  }

  try
  {
    std::vector<NumberPrefix> new_number_prefixes;

    JSON_ASSERT_CONTAINS(doc, "number_blocks");
    JSON_ASSERT_ARRAY(doc["number_blocks"]);
    const rapidjson::Value& nb_arr = doc["number_blocks"];

    for (rapidjson::Value::ConstValueIterator nb_it = nb_arr.Begin();
         nb_it != nb_arr.End();
         ++nb_it)
    {
      try
      {
        std::string prefix;
        JSON_GET_STRING_MEMBER(*nb_it, "prefix", prefix);
        std::string regex;
        JSON_GET_STRING_MEMBER(*nb_it, "regex", regex);

        // Entry is well-formed, so add it.
        TRC_DEBUG("Found valid number prefix block %s", prefix.c_str());
        NumberPrefix pfix;
        pfix.prefix = prefix;

        if (parse_regex_replace(regex, pfix.match, pfix.replace))
        {
          new_number_prefixes.push_back(pfix);
          TRC_STATUS("  Adding number prefix %s, regex=%s",
                     pfix.prefix.c_str(), regex.c_str());
        }
        else
        {
          TRC_WARNING("Badly formed regular expression in ENUM number block %s",
                      regex.c_str());
        }
      }
      catch (JsonFormatError err)
      {
        // Badly formed number block.
        TRC_WARNING("Badly formed ENUM number block (hit error at %s:%d)",
                    err._file, err._line);
        CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str());
      }
    }

    // Take a write lock on the mutex in RAII style
    boost::lock_guard<boost::shared_mutex> write_lock(_number_prefixes_rw_lock);
    _number_prefixes = new_number_prefixes;
  }
  catch (JsonFormatError err)
  {
    TRC_ERROR("Badly formed ENUM configuration data - missing number_blocks object");
    CL_SPROUT_ENUM_FILE_INVALID.log(_configuration.c_str());
  }
}
Example #18
0
static int start_this_handle(journal_t *journal, handle_t *handle,
			     gfp_t gfp_mask)
{
	transaction_t	*transaction, *new_transaction = NULL;
	tid_t		tid;
	int		needed, need_to_start;
	int		nblocks = handle->h_buffer_credits;
	unsigned long ts = jiffies;

	if (nblocks > journal->j_max_transaction_buffers) {
		printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
		       current->comm, nblocks,
		       journal->j_max_transaction_buffers);
		return -ENOSPC;
	}

alloc_transaction:
	if (!journal->j_running_transaction) {
		new_transaction = kmem_cache_alloc(transaction_cache,
						   gfp_mask | __GFP_ZERO);
		if (!new_transaction) {
			if ((gfp_mask & __GFP_FS) == 0) {
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto alloc_transaction;
			}
			return -ENOMEM;
		}
	}

	jbd_debug(3, "New handle %p going live.\n", handle);

repeat:
	read_lock(&journal->j_state_lock);
	BUG_ON(journal->j_flags & JBD2_UNMOUNT);
	if (is_journal_aborted(journal) ||
	    (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
		read_unlock(&journal->j_state_lock);
		jbd2_journal_free_transaction(new_transaction);
		return -EROFS;
	}

	
	if (journal->j_barrier_count) {
		read_unlock(&journal->j_state_lock);
		wait_event(journal->j_wait_transaction_locked,
				journal->j_barrier_count == 0);
		goto repeat;
	}

	if (!journal->j_running_transaction) {
		read_unlock(&journal->j_state_lock);
		if (!new_transaction)
			goto alloc_transaction;
		write_lock(&journal->j_state_lock);
		if (!journal->j_running_transaction &&
		    !journal->j_barrier_count) {
			jbd2_get_transaction(journal, new_transaction);
			new_transaction = NULL;
		}
		write_unlock(&journal->j_state_lock);
		goto repeat;
	}

	transaction = journal->j_running_transaction;

	if (transaction->t_state == T_LOCKED) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_transaction_locked,
					&wait, TASK_UNINTERRUPTIBLE);
		read_unlock(&journal->j_state_lock);
		schedule();
		finish_wait(&journal->j_wait_transaction_locked, &wait);
		goto repeat;
	}

	needed = atomic_add_return(nblocks,
				   &transaction->t_outstanding_credits);

	if (needed > journal->j_max_transaction_buffers) {
		DEFINE_WAIT(wait);

		jbd_debug(2, "Handle %p starting new commit...\n", handle);
		atomic_sub(nblocks, &transaction->t_outstanding_credits);
		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
				TASK_UNINTERRUPTIBLE);
		tid = transaction->t_tid;
		need_to_start = !tid_geq(journal->j_commit_request, tid);
		read_unlock(&journal->j_state_lock);
		if (need_to_start)
			jbd2_log_start_commit(journal, tid);
		schedule();
		finish_wait(&journal->j_wait_transaction_locked, &wait);
		goto repeat;
	}


	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
		atomic_sub(nblocks, &transaction->t_outstanding_credits);
		read_unlock(&journal->j_state_lock);
		write_lock(&journal->j_state_lock);
		if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
			__jbd2_log_wait_for_space(journal);
		write_unlock(&journal->j_state_lock);
		goto repeat;
	}

	update_t_max_wait(transaction, ts);
	handle->h_transaction = transaction;
	atomic_inc(&transaction->t_updates);
	atomic_inc(&transaction->t_handle_count);
	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
		  handle, nblocks,
		  atomic_read(&transaction->t_outstanding_credits),
		  __jbd2_log_space_left(journal));
	read_unlock(&journal->j_state_lock);

	lock_map_acquire(&handle->h_lockdep_map);
	jbd2_journal_free_transaction(new_transaction);
	return 0;
}
Example #19
0
unsigned long hp_sdc_put(void)
{
	hp_sdc_transaction *curr;
	uint8_t act;
	int idx, curridx;

	int limit = 0;

	write_lock(&hp_sdc.lock);

	/* If i8042 buffers are full, we cannot do anything that
	   requires output, so we skip to the administrativa. */
	if (hp_sdc.ibf) {
		hp_sdc_status_in8();
		if (hp_sdc.ibf)
			goto finish;
	}

 anew:
	/* See if we are in the middle of a sequence. */
	if (hp_sdc.wcurr < 0)
		hp_sdc.wcurr = 0;
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr == hp_sdc.wcurr)
		hp_sdc.wcurr++;
	read_unlock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;
	curridx = hp_sdc.wcurr;

	if (hp_sdc.tq[curridx] != NULL)
		goto start;

	while (++curridx != hp_sdc.wcurr) {
		if (curridx >= HP_SDC_QUEUE_LEN) {
			curridx = -1; /* Wrap to top */
			continue;
		}
		read_lock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.rcurr == curridx) {
			read_unlock_irq(&hp_sdc.rtq_lock);
			continue;
		}
		read_unlock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.tq[curridx] != NULL)
			break; /* Found one. */
	}
	if (curridx == hp_sdc.wcurr) { /* There's nothing queued to do. */
		curridx = -1;
	}
	hp_sdc.wcurr = curridx;

 start:

	/* Check to see if the interrupt mask needs to be set. */
	if (hp_sdc.set_im) {
		hp_sdc_status_out8(hp_sdc.im | HP_SDC_CMD_SET_IM);
		hp_sdc.set_im = 0;
		goto finish;
	}

	if (hp_sdc.wcurr == -1)
		goto done;

	curr = hp_sdc.tq[curridx];
	idx = curr->actidx;

	if (curr->actidx >= curr->endidx) {
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	act = curr->seq[idx];
	idx++;

	if (curr->idx >= curr->endidx) {
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	while (act & HP_SDC_ACT_PRECMD) {
		if (curr->idx != idx) {
			idx++;
			act &= ~HP_SDC_ACT_PRECMD;
			break;
		}
		hp_sdc_status_out8(curr->seq[idx]);
		curr->idx++;
		/* act finished? */
		if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_PRECMD)
			goto actdone;
		/* skip quantity field if data-out sequence follows. */
		if (act & HP_SDC_ACT_DATAOUT)
			curr->idx++;
		goto finish;
	}
	if (act & HP_SDC_ACT_DATAOUT) {
		int qty;

		qty = curr->seq[idx];
		idx++;
		if (curr->idx - idx < qty) {
			hp_sdc_data_out8(curr->seq[curr->idx]);
			curr->idx++;
			/* act finished? */
			if (curr->idx - idx >= qty &&
			    (act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT)
				goto actdone;
			goto finish;
		}
		idx += qty;
		act &= ~HP_SDC_ACT_DATAOUT;
	} else
	    while (act & HP_SDC_ACT_DATAREG) {
		int mask;
		uint8_t w7[4];

		mask = curr->seq[idx];
		if (idx != curr->idx) {
			idx++;
			idx += !!(mask & 1);
			idx += !!(mask & 2);
			idx += !!(mask & 4);
			idx += !!(mask & 8);
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0];
		w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1];
		w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2];
		w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3];

		if (hp_sdc.wi > 0x73 || hp_sdc.wi < 0x70 ||
		    w7[hp_sdc.wi - 0x70] == hp_sdc.r7[hp_sdc.wi - 0x70]) {
			int i = 0;

			/* Need to point the write index register */
			while (i < 4 && w7[i] == hp_sdc.r7[i])
				i++;

			if (i < 4) {
				hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
				hp_sdc.wi = 0x70 + i;
				goto finish;
			}

			idx++;
			if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
				goto actdone;

			curr->idx = idx;
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
		hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
		hp_sdc.wi++; /* write index register autoincrements */
		{
			int i = 0;

			while ((i < 4) && w7[i] == hp_sdc.r7[i])
				i++;
			if (i >= 4) {
				curr->idx = idx + 1;
				if ((act & HP_SDC_ACT_DURING) ==
				    HP_SDC_ACT_DATAREG)
					goto actdone;
			}
		}
		goto finish;
	}
	/* We don't go any further in the command if there is a pending read,
	   because we don't want interleaved results. */
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr >= 0) {
		read_unlock_irq(&hp_sdc.rtq_lock);
		goto finish;
	}
	read_unlock_irq(&hp_sdc.rtq_lock);


	if (act & HP_SDC_ACT_POSTCMD) {
		uint8_t postcmd;

		/* curr->idx should == idx at this point. */
		postcmd = curr->seq[idx];
		curr->idx++;
		if (act & HP_SDC_ACT_DATAIN) {

			/* Start a new read */
			hp_sdc.rqty = curr->seq[curr->idx];
			do_gettimeofday(&hp_sdc.rtv);
			curr->idx++;
			/* Still need to lock here in case of spurious irq. */
			write_lock_irq(&hp_sdc.rtq_lock);
			hp_sdc.rcurr = curridx;
			write_unlock_irq(&hp_sdc.rtq_lock);
			hp_sdc_status_out8(postcmd);
			goto finish;
		}
		hp_sdc_status_out8(postcmd);
		goto actdone;
	}

 actdone:
	if (act & HP_SDC_ACT_SEMAPHORE)
		up(curr->act.semaphore);
	else if (act & HP_SDC_ACT_CALLBACK)
		curr->act.irqhook(0,NULL,0,0);

	if (curr->idx >= curr->endidx) { /* This transaction is over. */
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
	} else {
		curr->actidx = idx + 1;
		curr->idx = idx + 2;
	}
	/* Interleave outbound data between the transactions. */
	hp_sdc.wcurr++;
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;

 finish:
	/* If by some quirk IBF has cleared and our ISR has run to
	   see that that has happened, do it all again. */
	if (!hp_sdc.ibf && limit++ < 20)
		goto anew;

 done:
	if (hp_sdc.wcurr >= 0)
		tasklet_schedule(&hp_sdc.task);
	write_unlock(&hp_sdc.lock);

	return 0;
}
Example #20
0
static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
{
	transaction_t *transaction;
	struct journal_head *jh;
	int may_free = 1;
	int ret;

	BUFFER_TRACE(bh, "entry");


	if (!buffer_jbd(bh))
		goto zap_buffer_unlocked;

	
	write_lock(&journal->j_state_lock);
	jbd_lock_bh_state(bh);
	spin_lock(&journal->j_list_lock);

	jh = jbd2_journal_grab_journal_head(bh);
	if (!jh)
		goto zap_buffer_no_jh;

	transaction = jh->b_transaction;
	if (transaction == NULL) {
		if (!jh->b_cp_transaction) {
			JBUFFER_TRACE(jh, "not on any transaction: zap");
			goto zap_buffer;
		}

		if (!buffer_dirty(bh)) {
			/* bdflush has written it.  We can drop it now */
			goto zap_buffer;
		}

		/* OK, it must be in the journal but still not
		 * written fully to disk: it's metadata or
		 * journaled data... */

		if (journal->j_running_transaction) {
			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
			ret = __dispose_buffer(jh,
					journal->j_running_transaction);
			jbd2_journal_put_journal_head(jh);
			spin_unlock(&journal->j_list_lock);
			jbd_unlock_bh_state(bh);
			write_unlock(&journal->j_state_lock);
			return ret;
		} else {
			if (journal->j_committing_transaction) {
				JBUFFER_TRACE(jh, "give to committing trans");
				ret = __dispose_buffer(jh,
					journal->j_committing_transaction);
				jbd2_journal_put_journal_head(jh);
				spin_unlock(&journal->j_list_lock);
				jbd_unlock_bh_state(bh);
				write_unlock(&journal->j_state_lock);
				return ret;
			} else {
				clear_buffer_jbddirty(bh);
				goto zap_buffer;
			}
		}
	} else if (transaction == journal->j_committing_transaction) {
		JBUFFER_TRACE(jh, "on committing transaction");
		set_buffer_freed(bh);
		if (journal->j_running_transaction && buffer_jbddirty(bh))
			jh->b_next_transaction = journal->j_running_transaction;
		jbd2_journal_put_journal_head(jh);
		spin_unlock(&journal->j_list_lock);
		jbd_unlock_bh_state(bh);
		write_unlock(&journal->j_state_lock);
		return 0;
	} else {
		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
		JBUFFER_TRACE(jh, "on running transaction");
		may_free = __dispose_buffer(jh, transaction);
	}

zap_buffer:
	jbd2_journal_put_journal_head(jh);
zap_buffer_no_jh:
	spin_unlock(&journal->j_list_lock);
	jbd_unlock_bh_state(bh);
	write_unlock(&journal->j_state_lock);
zap_buffer_unlocked:
	clear_buffer_dirty(bh);
	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
	clear_buffer_mapped(bh);
	clear_buffer_req(bh);
	clear_buffer_new(bh);
	clear_buffer_delay(bh);
	clear_buffer_unwritten(bh);
	bh->b_bdev = NULL;
	return may_free;
}
Example #21
0
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	int err = 0;
	struct fs_struct *fs, *new_fs = NULL;
	struct sighand_struct *new_sigh = NULL;
	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct nsproxy *new_nsproxy = NULL;
	int do_sysvsem = 0;

	check_unshare_flags(&unshare_flags);

	/* Return -EINVAL for all unsupported flags */
	err = -EINVAL;
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
		goto bad_unshare_out;

	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
		do_sysvsem = 1;
	if ((err = unshare_thread(unshare_flags)))
		goto bad_unshare_out;
	if ((err = unshare_fs(unshare_flags, &new_fs)))
		goto bad_unshare_cleanup_thread;
	if ((err = unshare_sighand(unshare_flags, &new_sigh)))
		goto bad_unshare_cleanup_fs;
	if ((err = unshare_vm(unshare_flags, &new_mm)))
		goto bad_unshare_cleanup_sigh;
	if ((err = unshare_fd(unshare_flags, &new_fd)))
		goto bad_unshare_cleanup_vm;
	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
			new_fs)))
		goto bad_unshare_cleanup_fd;

	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}

		if (new_nsproxy) {
			switch_task_namespaces(current, new_nsproxy);
			new_nsproxy = NULL;
		}

		task_lock(current);

		if (new_fs) {
			fs = current->fs;
			write_lock(&fs->lock);
			current->fs = new_fs;
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
			write_unlock(&fs->lock);
		}

		if (new_mm) {
			mm = current->mm;
			active_mm = current->active_mm;
			current->mm = new_mm;
			current->active_mm = new_mm;
			activate_mm(active_mm, new_mm);
			new_mm = mm;
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);
	}

	if (new_nsproxy)
		put_nsproxy(new_nsproxy);

bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_vm:
	if (new_mm)
		mmput(new_mm);

bad_unshare_cleanup_sigh:
	if (new_sigh)
		if (atomic_dec_and_test(&new_sigh->count)) {
			 kmem_cache_free(sighand_cachep, new_sigh);
}

bad_unshare_cleanup_fs:
	if (new_fs)
		free_fs_struct(new_fs);

bad_unshare_cleanup_thread:
bad_unshare_out:
	return err;
}
Example #22
0
static inline void action_lock_lock(void)
{
	lockdep_off();
	write_lock(&krg_action_lock);
}
Example #23
0
/* clean cache tries to find something to clean
 * and cleans it.
 * It returns 1 if it cleaned something,
 *            0 if it didn't find anything this time
 *           -1 if it fell off the end of the list.
 */
static int cache_clean(void)
{
	int rv = 0;
	struct list_head *next;

	spin_lock(&cache_list_lock);

	/* find a suitable table if we don't already have one */
	while (current_detail == NULL ||
	    current_index >= current_detail->hash_size) {
		if (current_detail)
			next = current_detail->others.next;
		else
			next = cache_list.next;
		if (next == &cache_list) {
			current_detail = NULL;
			spin_unlock(&cache_list_lock);
			return -1;
		}
		current_detail = list_entry(next, struct cache_detail, others);
		if (current_detail->nextcheck > get_seconds())
			current_index = current_detail->hash_size;
		else {
			current_index = 0;
			current_detail->nextcheck = get_seconds()+30*60;
		}
	}

	/* find a non-empty bucket in the table */
	while (current_detail &&
	       current_index < current_detail->hash_size &&
	       current_detail->hash_table[current_index] == NULL)
		current_index++;

	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
	
	if (current_detail && current_index < current_detail->hash_size) {
		struct cache_head *ch, **cp;
		struct cache_detail *d;
		
		write_lock(&current_detail->hash_lock);

		/* Ok, now to clean this strand */
			
		cp = & current_detail->hash_table[current_index];
		ch = *cp;
		for (; ch; cp= & ch->next, ch= *cp) {
			if (current_detail->nextcheck > ch->expiry_time)
				current_detail->nextcheck = ch->expiry_time+1;
			if (ch->expiry_time >= get_seconds()
			    && ch->last_refresh >= current_detail->flush_time
				)
				continue;
			if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
				queue_loose(current_detail, ch);

			if (atomic_read(&ch->refcnt) == 1)
				break;
		}
		if (ch) {
			*cp = ch->next;
			ch->next = NULL;
			current_detail->entries--;
			rv = 1;
		}
		write_unlock(&current_detail->hash_lock);
		d = current_detail;
		if (!ch)
			current_index ++;
		spin_unlock(&cache_list_lock);
		if (ch)
			d->cache_put(ch, d);
	} else
		spin_unlock(&cache_list_lock);

	return rv;
}
/************************* Interrupt context ******************************/
static void hp_sdc_mlc_isr (int irq, void *dev_id,
			    uint8_t status, uint8_t data)
{
	int idx;
	hil_mlc *mlc = &hp_sdc_mlc;

	write_lock(&mlc->lock);
	if (mlc->icount < 0) {
		printk(KERN_WARNING PREFIX "HIL Overflow!\n");
		up(&mlc->isem);
		goto out;
	}
	idx = 15 - mlc->icount;
	if ((status & HP_SDC_STATUS_IRQMASK) == HP_SDC_STATUS_HILDATA) {
		mlc->ipacket[idx] |= data | HIL_ERR_INT;
		mlc->icount--;
		if (hp_sdc_mlc_priv.got5x || !idx)
			goto check;
		if ((mlc->ipacket[idx - 1] & HIL_PKT_ADDR_MASK) !=
		    (mlc->ipacket[idx] & HIL_PKT_ADDR_MASK)) {
			mlc->ipacket[idx] &= ~HIL_PKT_ADDR_MASK;
			mlc->ipacket[idx] |= (mlc->ipacket[idx - 1]
						& HIL_PKT_ADDR_MASK);
		}
		goto check;
	}
	/* We know status is 5X */
	if (data & HP_SDC_HIL_ISERR)
		goto err;
	mlc->ipacket[idx] =
		(data & HP_SDC_HIL_R1MASK) << HIL_PKT_ADDR_SHIFT;
	hp_sdc_mlc_priv.got5x = 1;
	goto out;

 check:
	hp_sdc_mlc_priv.got5x = 0;
	if (mlc->imatch == 0)
		goto done;
	if ((mlc->imatch == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL))
	    && (mlc->ipacket[idx] == (mlc->imatch | idx)))
		goto done;
	if (mlc->ipacket[idx] == mlc->imatch)
		goto done;
	goto out;

 err:
	printk(KERN_DEBUG PREFIX "err code %x\n", data);

	switch (data) {
	case HP_SDC_HIL_RC_DONE:
		printk(KERN_WARNING PREFIX "Bastard SDC reconfigured loop!\n");
		break;

	case HP_SDC_HIL_ERR:
		mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_PERR |
					HIL_ERR_FERR | HIL_ERR_FOF;
		break;

	case HP_SDC_HIL_TO:
		mlc->ipacket[idx] |= HIL_ERR_INT | HIL_ERR_LERR;
		break;

	case HP_SDC_HIL_RC:
		printk(KERN_WARNING PREFIX "Bastard SDC decided to reconfigure loop!\n");
		break;

	default:
		printk(KERN_WARNING PREFIX "Unknown HIL Error status (%x)!\n", data);
		break;
	}

	/* No more data will be coming due to an error. */
 done:
	tasklet_schedule(mlc->tasklet);
	up(&mlc->isem);
 out:
	write_unlock(&mlc->lock);
}
Example #25
0
inline void xp_write_lock(struct xp_rwlock *rwlock)
{
	write_lock(&(rwlock->rwlock));
}
Example #26
0
int
cifs_create(struct inode *inode, struct dentry *direntry, int mode,
		struct nameidata *nd)
{
	int rc = -ENOENT;
	int xid;
	int oplock = 0;
	int desiredAccess = GENERIC_READ | GENERIC_WRITE;
	__u16 fileHandle;
	struct cifs_sb_info *cifs_sb;
	struct cifsTconInfo *pTcon;
	char *full_path = NULL;
	FILE_ALL_INFO * buf = NULL;
	struct inode *newinode = NULL;
	struct cifsFileInfo * pCifsFile = NULL;
	struct cifsInodeInfo * pCifsInode;
	int disposition = FILE_OVERWRITE_IF;
	int write_only = FALSE;

	xid = GetXid();

	cifs_sb = CIFS_SB(inode->i_sb);
	pTcon = cifs_sb->tcon;

	down(&direntry->d_sb->s_vfs_rename_sem);
	full_path = build_path_from_dentry(direntry);
	up(&direntry->d_sb->s_vfs_rename_sem);
	if(full_path == NULL) {
		FreeXid(xid);
		return -ENOMEM;
	}

	if(nd) {
		if ((nd->intent.open.flags & O_ACCMODE) == O_RDONLY)
			desiredAccess = GENERIC_READ;
		else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY) {
			desiredAccess = GENERIC_WRITE;
			write_only = TRUE;
		} else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
			/* GENERIC_ALL is too much permission to request */
			/* can cause unnecessary access denied on create */
			/* desiredAccess = GENERIC_ALL; */
			desiredAccess = GENERIC_READ | GENERIC_WRITE;
		}

		if((nd->intent.open.flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
			disposition = FILE_CREATE;
		else if((nd->intent.open.flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
			disposition = FILE_OVERWRITE_IF;
		else if((nd->intent.open.flags & O_CREAT) == O_CREAT)
			disposition = FILE_OPEN_IF;
		else {
			cFYI(1,("Create flag not set in create function"));
		}
	}

	/* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */
	if (oplockEnabled)
		oplock = REQ_OPLOCK;

	buf = kmalloc(sizeof(FILE_ALL_INFO),GFP_KERNEL);
	if(buf == NULL) {
		kfree(full_path);
		FreeXid(xid);
		return -ENOMEM;
	}

	rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
			 desiredAccess, CREATE_NOT_DIR,
			 &fileHandle, &oplock, buf, cifs_sb->local_nls);
	if (rc) {
		cFYI(1, ("cifs_create returned 0x%x ", rc));
	} else {
		/* If Open reported that we actually created a file
		then we now have to set the mode if possible */
		if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
			(oplock & CIFS_CREATE_ACTION))
			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
				CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
					(__u64)current->euid,
					(__u64)current->egid,
					0 /* dev */,
					cifs_sb->local_nls);
			} else {
				CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
					(__u64)-1,
					(__u64)-1,
					0 /* dev */,
					cifs_sb->local_nls);
			}
		else {
			/* BB implement via Windows security descriptors */
			/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
			/* could set r/o dos attribute if mode & 0222 == 0 */
		}

	/* BB server might mask mode so we have to query for Unix case*/
		if (pTcon->ses->capabilities & CAP_UNIX)
			rc = cifs_get_inode_info_unix(&newinode, full_path,
						 inode->i_sb,xid);
		else {
			rc = cifs_get_inode_info(&newinode, full_path,
						 buf, inode->i_sb,xid);
			if(newinode)
				newinode->i_mode = mode;
		}

		if (rc != 0) {
			cFYI(1,("Create worked but get_inode_info failed with rc = %d",
			      rc));
		} else {
			direntry->d_op = &cifs_dentry_ops;
			d_instantiate(direntry, newinode);
		}
		if((nd->flags & LOOKUP_OPEN) == FALSE) {
			/* mknod case - do not leave file open */
			CIFSSMBClose(xid, pTcon, fileHandle);
		} else if(newinode) {
			pCifsFile = (struct cifsFileInfo *)
			   kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
		
			if (pCifsFile) {
				memset((char *)pCifsFile, 0,
				       sizeof (struct cifsFileInfo));
				pCifsFile->netfid = fileHandle;
				pCifsFile->pid = current->tgid;
				pCifsFile->pInode = newinode;
				pCifsFile->invalidHandle = FALSE;
				pCifsFile->closePend     = FALSE;
				init_MUTEX(&pCifsFile->fh_sem);
				/* put the following in at open now */
				/* pCifsFile->pfile = file; */ 
				write_lock(&GlobalSMBSeslock);
				list_add(&pCifsFile->tlist,&pTcon->openFileList);
				pCifsInode = CIFS_I(newinode);
				if(pCifsInode) {
				/* if readable file instance put first in list*/
					if (write_only == TRUE) {
                                        	list_add_tail(&pCifsFile->flist,
							&pCifsInode->openFileList);
					} else {
						list_add(&pCifsFile->flist,
							&pCifsInode->openFileList);
					}
					if((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
						pCifsInode->clientCanCacheAll = TRUE;
						pCifsInode->clientCanCacheRead = TRUE;
						cFYI(1,("Exclusive Oplock granted on inode %p",
							newinode));
					} else if((oplock & 0xF) == OPLOCK_READ)
						pCifsInode->clientCanCacheRead = TRUE;
				}
				write_unlock(&GlobalSMBSeslock);
			}
		}
	} 

	if (buf)
	    kfree(buf);
	if (full_path)
	    kfree(full_path);
	FreeXid(xid);

	return rc;
}
void SortedVectorMapRow<V>::ApplyBatchInc(const int32_t *column_ids,
    const void* updates, int32_t num_updates) {
  std::unique_lock<SharedMutex> write_lock(rw_mutex_);
  ApplyBatchIncUnsafe(column_ids, updates, num_updates);
}
asmlinkage int sys_setProcessBudget(pid_t pid, unsigned long budget, struct timespec period) {

    struct task_struct * curr;    
    struct task_struct * temp;
    unsigned long temp_time;
    struct cpufreq_policy * lastcpupolicy = cpufreq_cpu_get(0);
    unsigned long max_frequency = (lastcpupolicy->cpuinfo).max_freq / 1000 ; //Getting MAX frequency in MHz
    unsigned long sysclock_freq = 0;
    struct timespec task_budget;
    unsigned int ret_freq = 0, temp_freq;
    int ret_val;
    ktime_t p;
    struct task_ct_struct * list;
    
    //Error checks for input arguments
    if (!((period.tv_sec > 0) || (period.tv_nsec > 0))) {
	printk("Invalid time period\n");
	return -EINVAL;
    }

    if (pid <= 0) {
	printk("Invalid PID\n");
	return -EINVAL;
    }

    //checking admission
    write_lock(&tasklist_lock);

    //First get budget from cycles in (millions) / current CPU frequency in (Mhz) * 1000 ns
    temp_time = (budget / (max_frequency)) * 1000;
    task_budget = ns_to_timespec(temp_time);

    printk("Time in ns = %lu and frequency = %lu Mhz\n",temp_time,max_frequency);
    printk("Taks struct budget %ldsec and %ldnsec\n",task_budget.tv_sec,task_budget.tv_nsec);
    
    if(timespec_compare(&task_budget, &period) >= 0){
    	printk("Budget >= Period\n");
	write_unlock(&tasklist_lock);
    	return -EINVAL;
    }
    
    //We need to do it only in the case when we are running tasks without bin packing
    if(is_bin_packing_set == 0){
	if(check_admission(task_budget, period) == 0){
	    printk("Cant add task to the taskset\n");
	    write_unlock(&tasklist_lock);
	    return -EPERM;
	} 
    }

    //Finding task struct given its pid
    curr = (struct task_struct *) find_task_by_vpid(pid);
    if(curr == NULL){
	printk("Couldn't find task\n");
	write_unlock(&tasklist_lock);
	return -ESRCH;
    }

    //If the task already had budget we are essentially changing the budget
    //So the task should go in a new place in the global list.
    //Thus, we delete the task and then reinsert it in the list.
    if(curr->is_budget_set == 1){
	del_periodic_task(&(curr->periodic_task));
    }

    //First check if a period timer already exists from a previous edition of this syscall.
    //If yes then we cancel it.
    //If this syscall returns 0 or 1 then timer is succesfully cancelled  
    if (((curr -> time_period).tv_sec > 0) || ((curr -> time_period).tv_nsec > 0)) {
	hrtimer_cancel(&(curr->period_timer));
    }
    //If timer is being initialized the first time then
    //Initialize timer , set the callback and set  expiry period to Budget 
    else {
	hrtimer_init(&(curr->period_timer),CLOCK_MONOTONIC,HRTIMER_MODE_REL);
	(curr->period_timer).function = &period_timer_callback;
    }

    //First check if a budget timer already exists from a previous edition of this syscall.
    //If yes then we cancel it.
    // If this syscall returns 0 or 1 then timer is succesfully cancelled  
    if (((curr -> budget_time).tv_sec > 0) || ((curr -> budget_time).tv_nsec > 0)) {
	hrtimer_cancel(&(curr->budget_timer));
    }
    //If timer is being initialized the first time then
    //Initialize timer , set the callback and set  expiry period to Budget 
    else {
	hrtimer_init(&(curr->budget_timer),CLOCK_MONOTONIC,HRTIMER_MODE_REL);
	(curr->budget_timer).function = &budget_timer_callback;
    }

    //Setting flag
    if(is_bin_packing_set == 0){
	curr->is_budget_set = 1;
	temp = curr;
	do {
	    //Setting flag
	    temp->is_budget_set = 1;
	}while_each_thread(curr,temp);
    }
double SortedVectorMapRow<V>::ApplyBatchIncGetImportance(
    const int32_t *column_ids,
    const void* updates, int32_t num_updates) {
  std::unique_lock<SharedMutex> write_lock(rw_mutex_);
  return ApplyBatchIncUnsafeGetImportance(column_ids, updates, num_updates);
}
Example #30
0
File: file.c Project: nhanh0/hah
/*
 * Expand the fdset in the files_struct.  Called with the files spinlock
 * held for write.
 */
int expand_fdset(struct files_struct *files, int nr)
{
    fd_set *new_openset = 0, *new_execset = 0;
    int error, nfds = 0;

    error = -EMFILE;
    if (files->max_fdset >= NR_OPEN || nr >= NR_OPEN)
        goto out;

    nfds = files->max_fdset;
    write_unlock(&files->file_lock);

    /* Expand to the max in easy steps */
    do {
        if (nfds < (PAGE_SIZE * 8))
            nfds = PAGE_SIZE * 8;
        else {
            nfds = nfds * 2;
            if (nfds > NR_OPEN)
                nfds = NR_OPEN;
        }
    } while (nfds <= nr);

    error = -ENOMEM;
    new_openset = alloc_fdset(nfds);
    new_execset = alloc_fdset(nfds);
    write_lock(&files->file_lock);
    if (!new_openset || !new_execset)
        goto out;

    error = 0;

    /* Copy the existing tables and install the new pointers */
    if (nfds > files->max_fdset) {
        int i = files->max_fdset / (sizeof(unsigned long) * 8);
        int count = (nfds - files->max_fdset) / 8;

        /*
         * Don't copy the entire array if the current fdset is
         * not yet initialised.
         */
        if (i) {
            memcpy (new_openset, files->open_fds, files->max_fdset/8);
            memcpy (new_execset, files->close_on_exec, files->max_fdset/8);
            memset (&new_openset->fds_bits[i], 0, count);
            memset (&new_execset->fds_bits[i], 0, count);
        }

        nfds = xchg(&files->max_fdset, nfds);
        new_openset = xchg(&files->open_fds, new_openset);
        new_execset = xchg(&files->close_on_exec, new_execset);
        write_unlock(&files->file_lock);
        free_fdset (new_openset, nfds);
        free_fdset (new_execset, nfds);
        write_lock(&files->file_lock);
        return 0;
    }
    /* Somebody expanded the array while we slept ... */

out:
    write_unlock(&files->file_lock);
    if (new_openset)
        free_fdset(new_openset, nfds);
    if (new_execset)
        free_fdset(new_execset, nfds);
    write_lock(&files->file_lock);
    return error;
}