コード例 #1
0
ファイル: io_count.c プロジェクト: GSathish/casamia-datastore
ssize_t pread(int fd, void * buf, size_t count, off_t offset)
{
	do_setup();
	if(!isatty(fd))
	{
		io_stats.pread_total += count;
		io_stats.pread_calls++;
	}
	return __libc_pread(fd, buf, count, offset);
}
コード例 #2
0
ファイル: aio_misc.c プロジェクト: KubaKaszycki/kklibc
static void *
handle_fildes_io (void *arg)
{
  pthread_t self = pthread_self ();
  struct sched_param param;
  struct requestlist *runp = (struct requestlist *) arg;
  aiocb_union *aiocbp;
  int policy;
  int fildes;

  pthread_getschedparam (self, &policy, &param);

  do
    {
      /* If runp is NULL, then we were created to service the work queue
	 in general, not to handle any particular request. In that case we
	 skip the "do work" stuff on the first pass, and go directly to the
	 "get work off the work queue" part of this loop, which is near the
	 end. */
      if (runp == NULL)
	pthread_mutex_lock (&__aio_requests_mutex);
      else
	{
	  /* Hopefully this request is marked as running.  */
	  assert (runp->running == allocated);

	  /* Update our variables.  */
	  aiocbp = runp->aiocbp;
	  fildes = aiocbp->aiocb.aio_fildes;

	  /* Change the priority to the requested value (if necessary).  */
	  if (aiocbp->aiocb.__abs_prio != param.sched_priority
	      || aiocbp->aiocb.__policy != policy)
	    {
	      param.sched_priority = aiocbp->aiocb.__abs_prio;
	      policy = aiocbp->aiocb.__policy;
	      pthread_setschedparam (self, policy, &param);
	    }

	  /* Process request pointed to by RUNP.  We must not be disturbed
	     by signals.  */
	  if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_READ)
	    {
	      if (sizeof (off_t) != sizeof (off64_t)
		  && aiocbp->aiocb.aio_lio_opcode & 128)
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (__pread64 (fildes, (void *)
						 aiocbp->aiocb64.aio_buf,
						 aiocbp->aiocb64.aio_nbytes,
						 aiocbp->aiocb64.aio_offset));
	      else
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (__libc_pread (fildes,
						    (void *)
						    aiocbp->aiocb.aio_buf,
						    aiocbp->aiocb.aio_nbytes,
						    aiocbp->aiocb.aio_offset));

	      if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
		/* The Linux kernel is different from others.  It returns
		   ESPIPE if using pread on a socket.  Other platforms
		   simply ignore the offset parameter and behave like
		   read.  */
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (read (fildes,
					    (void *) aiocbp->aiocb64.aio_buf,
					    aiocbp->aiocb64.aio_nbytes));
	    }
	  else if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_WRITE)
	    {
	      if (sizeof (off_t) != sizeof (off64_t)
		  && aiocbp->aiocb.aio_lio_opcode & 128)
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (__pwrite64 (fildes, (const void *)
						  aiocbp->aiocb64.aio_buf,
						  aiocbp->aiocb64.aio_nbytes,
						  aiocbp->aiocb64.aio_offset));
	      else
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (__libc_pwrite (fildes, (const void *)
					      aiocbp->aiocb.aio_buf,
					      aiocbp->aiocb.aio_nbytes,
					      aiocbp->aiocb.aio_offset));

	      if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
		/* The Linux kernel is different from others.  It returns
		   ESPIPE if using pwrite on a socket.  Other platforms
		   simply ignore the offset parameter and behave like
		   write.  */
		aiocbp->aiocb.__return_value =
		  TEMP_FAILURE_RETRY (write (fildes,
					     (void *) aiocbp->aiocb64.aio_buf,
					     aiocbp->aiocb64.aio_nbytes));
	    }
	  else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC)
	    aiocbp->aiocb.__return_value =
	      TEMP_FAILURE_RETRY (fdatasync (fildes));
	  else if (aiocbp->aiocb.aio_lio_opcode == LIO_SYNC)
	    aiocbp->aiocb.__return_value =
	      TEMP_FAILURE_RETRY (fsync (fildes));
	  else
	    {
	      /* This is an invalid opcode.  */
	      aiocbp->aiocb.__return_value = -1;
	      __set_errno (EINVAL);
	    }

	  /* Get the mutex.  */
	  pthread_mutex_lock (&__aio_requests_mutex);

	  if (aiocbp->aiocb.__return_value == -1)
	    aiocbp->aiocb.__error_code = errno;
	  else
	    aiocbp->aiocb.__error_code = 0;

	  /* Send the signal to notify about finished processing of the
	     request.  */
	  __aio_notify (runp);

	  /* For debugging purposes we reset the running flag of the
	     finished request.  */
	  assert (runp->running == allocated);
	  runp->running = done;

	  /* Now dequeue the current request.  */
	  __aio_remove_request (NULL, runp, 0);
	  if (runp->next_prio != NULL)
	    add_request_to_runlist (runp->next_prio);

	  /* Free the old element.  */
	  __aio_free_request (runp);
	}

      runp = runlist;

      /* If the runlist is empty, then we sleep for a while, waiting for
	 something to arrive in it. */
      if (runp == NULL && optim.aio_idle_time >= 0)
	{
	  struct timeval now;
	  struct timespec wakeup_time;

	  ++idle_thread_count;
	  __gettimeofday (&now, NULL);
	  wakeup_time.tv_sec = now.tv_sec + optim.aio_idle_time;
	  wakeup_time.tv_nsec = now.tv_usec * 1000;
	  if (wakeup_time.tv_nsec >= 1000000000)
	    {
	      wakeup_time.tv_nsec -= 1000000000;
	      ++wakeup_time.tv_sec;
	    }
	  pthread_cond_timedwait (&__aio_new_request_notification,
				  &__aio_requests_mutex,
				  &wakeup_time);
	  --idle_thread_count;
	  runp = runlist;
	}

      if (runp == NULL)
	--nthreads;
      else
	{
	  assert (runp->running == yes);
	  runp->running = allocated;
	  runlist = runp->next_run;

	  /* If we have a request to process, and there's still another in
	     the run list, then we need to either wake up or create a new
	     thread to service the request that is still in the run list. */
	  if (runlist != NULL)
	    {
	      /* There are at least two items in the work queue to work on.
		 If there are other idle threads, then we should wake them
		 up for these other work elements; otherwise, we should try
		 to create a new thread. */
	      if (idle_thread_count > 0)
		pthread_cond_signal (&__aio_new_request_notification);
	      else if (nthreads < optim.aio_threads)
		{
		  pthread_t thid;
		  pthread_attr_t attr;

		  /* Make sure the thread is created detached.  */
		  pthread_attr_init (&attr);
		  pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);

		  /* Now try to start a thread. If we fail, no big deal,
		     because we know that there is at least one thread (us)
		     that is working on AIO operations. */
		  if (pthread_create (&thid, &attr, handle_fildes_io, NULL)
		      == 0)
		    ++nthreads;
		}
	    }
	}

      /* Release the mutex.  */
      pthread_mutex_unlock (&__aio_requests_mutex);
    }
  while (runp != NULL);

  return NULL;
}
コード例 #3
0
ファイル: original_nonpth_syscalls.c プロジェクト: VanL/zrt
ssize_t syscall_pread(int fd, void *buf, size_t count, off_t offset){
    return __libc_pread(fd, buf, count, offset);
}