Ejemplo n.º 1
0
int test(char *URL)
{
  CURLcode res;
  CURL *curl;

  if(!strcmp(URL, "check")) {
    /* used by the test script to ask if we can run this test or not */
    if(rlimit(FALSE)) {
      fprintf(stdout, "rlimit problem: %s\n", msgbuff);
      return 1;
    }
    return 0; /* sure, run this! */
  }

  if(rlimit(TRUE)) {
    /* failure */
    return TEST_ERR_MAJOR_BAD;
  }

  /* run the test with the bunch of open file descriptors
     and close them all once the test is over */

  if(curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) {
    fprintf(stderr, "curl_global_init() failed\n");
    close_file_descriptors();
    return TEST_ERR_MAJOR_BAD;
  }

  curl = curl_easy_init();
  if(!curl) {
    fprintf(stderr, "curl_easy_init() failed\n");
    close_file_descriptors();
    curl_global_cleanup();
    return TEST_ERR_MAJOR_BAD;
  }

  test_setopt(curl, CURLOPT_URL, URL);
  test_setopt(curl, CURLOPT_HEADER, 1L);

  res = curl_easy_perform(curl);

test_cleanup:

  close_file_descriptors();
  curl_easy_cleanup(curl);
  curl_global_cleanup();

  return (int)res;
}
Ejemplo n.º 2
0
/**
 * inode_newsize_ok - may this inode be truncated to a given size
 * @inode:	the inode to be truncated
 * @offset:	the new size to assign to the inode
 * @Returns:	0 on success, -ve errno on failure
 *
 * inode_newsize_ok must be called with i_mutex held.
 *
 * inode_newsize_ok will check filesystem limits and ulimits to check that the
 * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
 * when necessary. Caller must not proceed with inode size change if failure is
 * returned. @inode must be a file (not directory), with appropriate
 * permissions to allow truncate (inode_newsize_ok does NOT check these
 * conditions).
 */
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
	if (inode->i_size < offset) {
		unsigned long limit;

		limit = rlimit(RLIMIT_FSIZE);
		if (limit != RLIM_INFINITY && offset > limit)
			goto out_sig;
		if (offset > inode->i_sb->s_maxbytes)
			goto out_big;
	} else {
		/*
		 * truncation of in-use swapfiles is disallowed - it would
		 * cause subsequent swapout to scribble on the now-freed
		 * blocks.
		 */
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
	}

	return 0;
out_sig:
	send_sig(SIGXFSZ, current, 0);
out_big:
	return -EFBIG;
}
Ejemplo n.º 3
0
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
	if (inode->i_size < offset) {
		unsigned long limit;

		limit = rlimit(RLIMIT_FSIZE);
		if (limit != RLIM_INFINITY && offset > limit)
			goto out_sig;
		if (offset > inode->i_sb->s_maxbytes)
			goto out_big;
	} else {
		/*
                                                            
                                                          
            
   */
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
	}

	return 0;
out_sig:
	send_sig(SIGXFSZ, current, 0);
out_big:
	return -EFBIG;
}
Ejemplo n.º 4
0
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	unsigned long random_factor = mmap_rnd();
	unsigned long gap;

	/*
	 * Fall back to the standard layout if the personality
	 * bit is set, or if the expected stack growth is unlimited:
	 */
	gap = rlimit(RLIMIT_STACK);
	if (!test_thread_flag(TIF_32BIT) ||
	    (current->personality & ADDR_COMPAT_LAYOUT) ||
	    gap == RLIM_INFINITY ||
	    sysctl_legacy_va_layout) {
		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
	} else {
		/* We know it's 32-bit */
		unsigned long task_size = STACK_TOP32;

		if (gap < 128 * 1024 * 1024)
			gap = 128 * 1024 * 1024;
		if (gap > (task_size / 6 * 5))
			gap = (task_size / 6 * 5);

		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
	}
}
Ejemplo n.º 5
0
static inline int mmap_is_legacy(void)
{
	if (current->personality & ADDR_COMPAT_LAYOUT)
		return 1;
	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
		return 1;
	return sysctl_legacy_va_layout;
}
Ejemplo n.º 6
0
void
memobj_init(void)
{
	unsigned long min_bytes, max_bytes;
	unsigned long mem_bytes = GetPhysicalPages() * 0.90 * TILT_PAGESIZE;
	unsigned long cache_bytes = GetBcacheSize() * 2;

	min_bytes = 2048 * 1024;
	min_bytes = Max(min_bytes, cache_bytes);

	max_bytes = (unsigned long)INT_MAX;
	max_bytes = Min(max_bytes, mem_bytes);
	max_bytes = Min(max_bytes, rlimit(RLIMIT_DATA));
	max_bytes = Min(max_bytes, rlimit(RLIMIT_AS));
#ifdef RLIMIT_VMEM
	max_bytes = Min(max_bytes, rlimit(RLIMIT_VMEM));
#endif

	init_int(&MinHeapByte, min_bytes);
	init_int(&MaxHeapByte, 0.40 * max_bytes);
	assert(MinHeapByte <= MaxHeapByte);

#ifdef sparc
	assert(TILT_PAGESIZE == sysconf(_SC_PAGESIZE));
#else
	assert(TILT_PAGESIZE == sysconf(_SC_PAGE_SIZE));
#endif
	StackInitialize();
	HeapInitialize();
	GuardStackletSize = TILT_PAGESIZE / kilobyte;
	stackletOffset = (GuardStackletSize + MLStackletSize + CStackletSize) *
		kilobyte;
	primaryStackletOffset = 0;
	replicaStackletOffset = stackletOffset;
	/* So we don't pay mmap for first thread - general case? XXXX */
	{ 
		int i;
		Stacklet_t *temp[5];
		for (i=0; i<5; i++)
			temp[i] = Stacklet_Alloc(NULL);
		for (i=0; i<5; i++)
			Stacklet_Dealloc(temp[i]);
	}
}
Ejemplo n.º 7
0
static inline unsigned long mmap_base(void)
{
	unsigned long gap = rlimit(RLIMIT_STACK);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
}
Ejemplo n.º 8
0
static unsigned long mmap_base(unsigned long rnd)
{
	unsigned long gap = rlimit(RLIMIT_STACK);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
Ejemplo n.º 9
0
static inline unsigned long mmap_base(void)
{
	unsigned long gap = rlimit(RLIMIT_STACK);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;
	gap &= PAGE_MASK;
	return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
Ejemplo n.º 10
0
static inline unsigned long mmap_base(void)
{
    unsigned long gap = rlimit(RLIMIT_STACK);

    if (gap < MIN_GAP)
        gap = MIN_GAP;
    else if (gap > MAX_GAP)
        gap = MAX_GAP;

    return STACK_TOP - (gap & PAGE_MASK);
}
Ejemplo n.º 11
0
static inline int mmap_is_legacy(void)
{
#ifdef CONFIG_64BIT
    /*
     * Force standard allocation for 64 bit programs.
     */
    if (!is_compat_task())
        return 1;
#endif
    return sysctl_legacy_va_layout ||
           (current->personality & ADDR_COMPAT_LAYOUT) ||
           rlimit(RLIMIT_STACK) == RLIM_INFINITY;
}
Ejemplo n.º 12
0
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;
	unsigned long min_brk;

	down_write(&mm->mmap_sem);

#ifdef CONFIG_COMPAT_BRK
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	rlim = rlimit(RLIMIT_DATA);
	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
			(mm->end_data - mm->start_data) > rlim)
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
Ejemplo n.º 13
0
static inline unsigned long mmap_base(struct mm_struct *mm)
{
	unsigned long gap = rlimit(RLIMIT_STACK);
	unsigned long random_factor = 0;

	if (current->flags & PF_RANDOMIZE)
		random_factor = get_random_int() % (1024*1024);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
}
Ejemplo n.º 14
0
/*
 * These are the only things you should do on a core-file: use only these
 * functions to write out all the necessary info.
 */
static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
{
	unsigned long limit = rlimit(RLIMIT_CORE);
	ssize_t written;

	if (*foffset + nr > limit)
		return -EIO;

	written = file->f_op->write(file, addr, nr, &file->f_pos);
	*foffset += written;

	if (written != nr)
		return -EIO;

	return 0;
}
Ejemplo n.º 15
0
/*
 * siw_reg_user_mr()
 *
 * Register Memory Region.
 *
 * @ofa_pd:	OFA PD contained in siw PD.
 * @start:	starting address of MR (virtual address)
 * @len:	len of MR
 * @rnic_va:	not used by siw
 * @rights:	MR access rights
 * @udata:	user buffer to communicate STag and Key.
 */
struct ib_mr *siw_reg_user_mr(struct ib_pd *ofa_pd, u64 start, u64 len,
			      u64 rnic_va, int rights, struct ib_udata *udata)
{
	struct siw_mr		*mr = NULL;
	struct siw_pd		*pd = siw_pd_ofa2siw(ofa_pd);
	struct siw_umem		*umem = NULL;
	struct siw_ureq_reg_mr	ureq;
	struct siw_uresp_reg_mr	uresp;
	struct siw_dev		*sdev = pd->hdr.sdev;

	unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
	int rv;

	dprint(DBG_MM|DBG_OBJ, " start: 0x%016llx, "
		"va: 0x%016llx, len: %llu, ctx: %p\n",
		(unsigned long long)start,
		(unsigned long long)rnic_va,
		(unsigned long long)len,
		ofa_pd->uobject->context);
	if (atomic_inc_return(&sdev->num_mem) > SIW_MAX_MR) {
		dprint(DBG_ON, ": Out of MRs: %d\n",
			atomic_read(&sdev->num_mem));
		rv = -ENOMEM;
		goto err_out;
	}
	if (!len) {
		rv = -EINVAL;
		goto err_out;
	}
	if (mem_limit != RLIM_INFINITY) {
		unsigned long num_pages =
			(PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
		mem_limit >>= PAGE_SHIFT;

		if (num_pages > mem_limit - current->mm->locked_vm) {
			dprint(DBG_ON|DBG_MM,
				": pages req: %lu, limit: %lu, locked: %lu\n",
				num_pages, mem_limit, current->mm->locked_vm);
			rv = -ENOMEM;
			goto err_out;
		}
	}
Ejemplo n.º 16
0
/*
 * This function, called very early during the creation of a new
 * process VM image, sets up which VM layout function to use:
 */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
#if !defined(__tilegx__)
	int is_32bit = 1;
#elif defined(CONFIG_COMPAT)
	int is_32bit = is_compat_task();
#else
	int is_32bit = 0;
#endif
	unsigned long random_factor = 0UL;

	/*
	 *  8 bits of randomness in 32bit mmaps, 24 address space bits
	 * 12 bits of randomness in 64bit mmaps, 28 address space bits
	 */
	if (current->flags & PF_RANDOMIZE) {
		if (is_32bit)
			random_factor = get_random_int() % (1<<8);
		else
			random_factor = get_random_int() % (1<<12);

		random_factor <<= PAGE_SHIFT;
	}

	/*
	 * Use standard layout if the expected stack growth is unlimited
	 * or we are running native 64 bits.
	 */
	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
		mm->get_unmapped_area = arch_get_unmapped_area;
	} else {
		mm->mmap_base = mmap_base(mm);
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
	}
}
Ejemplo n.º 17
0
void arch_pick_mmap_layout(struct mm_struct *mm)
{
#if !defined(__tilegx__)
	int is_32bit = 1;
#elif defined(CONFIG_COMPAT)
	int is_32bit = is_compat_task();
#else
	int is_32bit = 0;
#endif

	/*
                                                                 
                                     
  */
	if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
		mm->mmap_base = TASK_UNMAPPED_BASE;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
	} else {
		mm->mmap_base = mmap_base(mm);
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
	}
}
Ejemplo n.º 18
0
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
{
	unsigned long gap = rlimit(RLIMIT_STACK);
	unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
	unsigned long gap_min, gap_max;

	/* Values close to RLIM_INFINITY can overflow. */
	if (gap + pad > gap)
		gap += pad;

	/*
	 * Top of mmap area (just below the process stack).
	 * Leave an at least ~128 MB hole with possible stack randomization.
	 */
	gap_min = SIZE_128M;
	gap_max = (task_size / 6) * 5;

	if (gap < gap_min)
		gap = gap_min;
	else if (gap > gap_max)
		gap = gap_max;

	return PAGE_ALIGN(task_size - gap - rnd);
}
Ejemplo n.º 19
0
/* format_corename will inspect the pattern parameter, and output a
 * name into corename, which must have space for at least
 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
 */
static int format_corename(struct core_name *cn, struct coredump_params *cprm)
{
	const struct cred *cred = current_cred();
	const char *pat_ptr = core_pattern;
	int ispipe = (*pat_ptr == '|');
	int pid_in_pattern = 0;
	int err = 0;

	cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
	cn->corename = kmalloc(cn->size, GFP_KERNEL);
	cn->used = 0;

	if (!cn->corename)
		return -ENOMEM;

	/* Repeat as long as we have more pattern to process and more output
	   space */
	while (*pat_ptr) {
		if (*pat_ptr != '%') {
			if (*pat_ptr == 0)
				goto out;
			err = cn_printf(cn, "%c", *pat_ptr++);
		} else {
			switch (*++pat_ptr) {
			/* single % at the end, drop that */
			case 0:
				goto out;
			/* Double percent, output one percent */
			case '%':
				err = cn_printf(cn, "%c", '%');
				break;
			/* pid */
			case 'p':
				pid_in_pattern = 1;
				err = cn_printf(cn, "%d",
					      task_tgid_vnr(current));
				break;
			/* uid */
			case 'u':
				err = cn_printf(cn, "%d", cred->uid);
				break;
			/* gid */
			case 'g':
				err = cn_printf(cn, "%d", cred->gid);
				break;
			case 'd':
				err = cn_printf(cn, "%d",
					__get_dumpable(cprm->mm_flags));
				break;
			/* signal that caused the coredump */
			case 's':
				err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
				break;
			/* UNIX time of coredump */
			case 't': {
				struct timeval tv;
				do_gettimeofday(&tv);
				err = cn_printf(cn, "%lu", tv.tv_sec);
				break;
			}
			/* hostname */
			case 'h': {
				char *namestart = cn->corename + cn->used;
				down_read(&uts_sem);
				err = cn_printf(cn, "%s",
					      utsname()->nodename);
				up_read(&uts_sem);
				cn_escape(namestart);
				break;
			}
			/* executable */
			case 'e': {
				char *commstart = cn->corename + cn->used;
				err = cn_printf(cn, "%s", current->comm);
				cn_escape(commstart);
				break;
			}
			case 'E':
				err = cn_print_exe_file(cn);
				break;
			/* core limit size */
			case 'c':
				err = cn_printf(cn, "%lu",
					      rlimit(RLIMIT_CORE));
				break;
			default:
				break;
			}
			++pat_ptr;
		}

		if (err)
			return err;
	}

	/* Backward compatibility with core_uses_pid:
	 *
	 * If core_pattern does not include a %p (as is the default)
	 * and core_uses_pid is set, then .%pid will be appended to
	 * the filename. Do not do this for piped commands. */
	if (!ispipe && !pid_in_pattern && core_uses_pid) {
		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
		if (err)
			return err;
	}
out:
	return ispipe;
}
Ejemplo n.º 20
0
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;
	unsigned long min_brk;

	down_write(&mm->mmap_sem);

#ifdef CONFIG_COMPAT_BRK
	/*
	 * CONFIG_COMPAT_BRK can still be overridden by setting
	 * randomize_va_space to 2, which will still cause mm->start_brk
	 * to be arbitrarily shifted
	 */
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	/*
	 * Check against rlimit here. If this check is done later after the test
	 * of oldbrk with newbrk then it can escape the test and let the data
	 * segment grow beyond its set limit the in case where the limit is
	 * not page aligned -Ram Gupta
	 */
	rlim = rlimit(RLIMIT_DATA);
	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
			(mm->end_data - mm->start_data) > rlim)
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
Ejemplo n.º 21
0
int siw_query_device(struct ib_device *ofa_dev, struct ib_device_attr *attr, struct ib_udata *udata)
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
	/*
	 * A process context is needed to report avail memory resources.
	 */
	if (in_interrupt())
		return -EINVAL;

	memset(attr, 0, sizeof *attr);

	attr->max_mr_size = rlimit(RLIMIT_MEMLOCK); /* per process */
	attr->vendor_id = sdev->attrs.vendor_id;
	attr->vendor_part_id = sdev->attrs.vendor_part_id;
	attr->max_qp = sdev->attrs.max_qp;
	attr->max_qp_wr = sdev->attrs.max_qp_wr;

	/*
	 * RDMA Read parameters:
	 * Max. ORD (Outbound Read queue Depth), a.k.a. max_initiator_depth
	 * Max. IRD (Inbound Read queue Depth), a.k.a. max_responder_resources
	 */
	attr->max_qp_rd_atom = sdev->attrs.max_ord;
	attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
	attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
	attr->device_cap_flags = sdev->attrs.cap_flags;
	attr->max_sge = sdev->attrs.max_sge;
	attr->max_sge_rd = sdev->attrs.max_sge_rd;
	attr->max_cq = sdev->attrs.max_cq;
	attr->max_cqe = sdev->attrs.max_cqe;
	attr->max_mr = sdev->attrs.max_mr;
	attr->max_pd = sdev->attrs.max_pd;
	attr->max_mw = sdev->attrs.max_mw;
	attr->max_fmr = sdev->attrs.max_fmr;
	attr->max_srq = sdev->attrs.max_srq;
	attr->max_srq_wr = sdev->attrs.max_srq_wr;
	attr->max_srq_sge = sdev->attrs.max_srq_sge;

	attr->max_ah = 0;
	attr->max_mcast_grp = 0;
	attr->max_mcast_qp_attach = 0;
	attr->max_total_mcast_qp_attach = 0;
	attr->max_ee = 0;
	attr->max_rdd = 0;
	attr->max_ee_rd_atom = 0;
	attr->max_ee_init_rd_atom = 0;
	attr->atomic_cap = IB_ATOMIC_NONE;

	memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);

	/*
	 * TODO: understand what of the following should
	 * get useful information
	 *
	 * attr->fw_ver;
	 * attr->max_map_per_fmr
	 * attr->max_raw_ipv6_qp
	 * attr->max_raw_ethy_qp
	 * attr->max_pkeys
	 * attr->page_size_cap;
	 * attr->hw_ver;
	 * attr->local_ca_ack_delay;
	 */

	udata->outlen = 0;
	return 0;
}
Ejemplo n.º 22
0
static int rlimit(int keep_open)
{
  int *tmpfd;
  int nitems, i;
  int *memchunk = NULL;
  char *fmt;
  struct rlimit rl;
  char strbuff[256];
  char strbuff1[81];
  char fmt_u[] = "%u";
  char fmt_lu[] = "%lu";
#ifdef HAVE_LONGLONG
  char fmt_llu[] = "%llu";

  if (sizeof(rl.rlim_max) > sizeof(long))
    fmt = fmt_llu;
  else
#endif
    fmt = (sizeof(rl.rlim_max) < sizeof(long))?fmt_u:fmt_lu;

  /* get initial open file limits */

  if (getrlimit(RLIMIT_NOFILE, &rl) != 0) {
    store_errmsg("getrlimit() failed", ERRNO);
    fprintf(stderr, "%s\n", msgbuff);
    return -1;
  }

  /* show initial open file limits */

#ifdef RLIM_INFINITY
  if (rl.rlim_cur == RLIM_INFINITY)
    strcpy(strbuff, "INFINITY");
  else
#endif
    sprintf(strbuff, fmt, rl.rlim_cur);
  fprintf(stderr, "initial soft limit: %s\n", strbuff);

#ifdef RLIM_INFINITY
  if (rl.rlim_max == RLIM_INFINITY)
    strcpy(strbuff, "INFINITY");
  else
#endif
    sprintf(strbuff, fmt, rl.rlim_max);
  fprintf(stderr, "initial hard limit: %s\n", strbuff);

  /*
   * if soft limit and hard limit are different we ask the
   * system to raise soft limit all the way up to the hard
   * limit. Due to some other system limit the soft limit
   * might not be raised up to the hard limit. So from this
   * point the resulting soft limit is our limit. Trying to
   * open more than soft limit file descriptors will fail.
   */

  if (rl.rlim_cur != rl.rlim_max) {

#ifdef OPEN_MAX
    if ((rl.rlim_cur > 0) &&
        (rl.rlim_cur < OPEN_MAX)) {
      fprintf(stderr, "raising soft limit up to OPEN_MAX\n");
      rl.rlim_cur = OPEN_MAX;
      if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {
        /* on failure don't abort just issue a warning */
        store_errmsg("setrlimit() failed", ERRNO);
        fprintf(stderr, "%s\n", msgbuff);
        msgbuff[0] = '\0';
      }
    }
#endif

    fprintf(stderr, "raising soft limit up to hard limit\n");
    rl.rlim_cur = rl.rlim_max;
    if (setrlimit(RLIMIT_NOFILE, &rl) != 0) {
      /* on failure don't abort just issue a warning */
      store_errmsg("setrlimit() failed", ERRNO);
      fprintf(stderr, "%s\n", msgbuff);
      msgbuff[0] = '\0';
    }

    /* get current open file limits */

    if (getrlimit(RLIMIT_NOFILE, &rl) != 0) {
      store_errmsg("getrlimit() failed", ERRNO);
      fprintf(stderr, "%s\n", msgbuff);
      return -3;
    }

    /* show current open file limits */

#ifdef RLIM_INFINITY
    if (rl.rlim_cur == RLIM_INFINITY)
      strcpy(strbuff, "INFINITY");
    else
#endif
      sprintf(strbuff, fmt, rl.rlim_cur);
    fprintf(stderr, "current soft limit: %s\n", strbuff);

#ifdef RLIM_INFINITY
    if (rl.rlim_max == RLIM_INFINITY)
      strcpy(strbuff, "INFINITY");
    else
#endif
      sprintf(strbuff, fmt, rl.rlim_max);
    fprintf(stderr, "current hard limit: %s\n", strbuff);

  } /* (rl.rlim_cur != rl.rlim_max) */

  /*
   * test 537 is all about testing libcurl functionality
   * when the system has nearly exhausted the number of
   * available file descriptors. Test 537 will try to run
   * with a very small number of file descriptors available.
   * This implies that any file descriptor which is open
   * when the test runs will have a number in the high range
   * of whatever the system supports.
   */

  /*
   * reserve a chunk of memory before opening file descriptors to
   * avoid a low memory condition once the file descriptors are
   * open. System conditions that could make the test fail should
   * be addressed in the precheck phase. This chunk of memory shall
   * be always free()ed before exiting the rlimit() function so
   * that it becomes available to the test.
   */

  for (nitems = i = 1; nitems <= i; i *= 2)
    nitems = i;
  if (nitems > 0x7fff)
    nitems = 0x40000;
  do {
    num_open.rlim_max = sizeof(*memchunk) * (size_t)nitems;
    sprintf(strbuff, fmt, num_open.rlim_max);
    fprintf(stderr, "allocating memchunk %s byte array\n", strbuff);
    memchunk = malloc(sizeof(*memchunk) * (size_t)nitems);
    if (!memchunk) {
      fprintf(stderr, "memchunk, malloc() failed\n");
      nitems /= 2;
    }
  } while (nitems && !memchunk);
  if (!memchunk) {
    store_errmsg("memchunk, malloc() failed", ERRNO);
    fprintf(stderr, "%s\n", msgbuff);
    return -4;
  }

  /* initialize it to fight lazy allocation */

  fprintf(stderr, "initializing memchunk array\n");

  for (i = 0; i < nitems; i++)
    memchunk[i] = -1;

  /* set the number of file descriptors we will try to open */

#ifdef RLIM_INFINITY
  if ((rl.rlim_cur > 0) && (rl.rlim_cur != RLIM_INFINITY)) {
#else
  if (rl.rlim_cur > 0) {
#endif
    /* soft limit minus SAFETY_MARGIN */
    num_open.rlim_max = rl.rlim_cur - SAFETY_MARGIN;
  }
  else {
    /* a huge number of file descriptors */
    for (nitems = i = 1; nitems <= i; i *= 2)
      nitems = i;
    if (nitems > 0x7fff)
      nitems = 0x40000;
    num_open.rlim_max = nitems;
  }

  /* verify that we won't overflow size_t in malloc() */

  if ((size_t)(num_open.rlim_max) > ((size_t)-1) / sizeof(*fd)) {
    sprintf(strbuff1, fmt, num_open.rlim_max);
    sprintf(strbuff, "unable to allocate an array for %s "
            "file descriptors, would overflow size_t", strbuff1);
    store_errmsg(strbuff, 0);
    fprintf(stderr, "%s\n", msgbuff);
    free(memchunk);
    return -5;
  }

  /* allocate array for file descriptors */

  do {
    sprintf(strbuff, fmt, num_open.rlim_max);
    fprintf(stderr, "allocating array for %s file descriptors\n", strbuff);
    fd = malloc(sizeof(*fd) * (size_t)(num_open.rlim_max));
    if (!fd) {
      fprintf(stderr, "fd, malloc() failed\n");
      num_open.rlim_max /= 2;
    }
  } while (num_open.rlim_max && !fd);
  if (!fd) {
    store_errmsg("fd, malloc() failed", ERRNO);
    fprintf(stderr, "%s\n", msgbuff);
    free(memchunk);
    return -6;
  }

  /* initialize it to fight lazy allocation */

  fprintf(stderr, "initializing fd array\n");

  for (num_open.rlim_cur = 0;
       num_open.rlim_cur < num_open.rlim_max;
       num_open.rlim_cur++)
    fd[num_open.rlim_cur] = -1;

  sprintf(strbuff, fmt, num_open.rlim_max);
  fprintf(stderr, "trying to open %s file descriptors\n", strbuff);

  /* open a dummy descriptor */

  fd[0] = open(DEV_NULL, O_RDONLY);
  if (fd[0] < 0) {
    sprintf(strbuff, "opening of %s failed", DEV_NULL);
    store_errmsg(strbuff, ERRNO);
    fprintf(stderr, "%s\n", msgbuff);
    free(fd);
    fd = NULL;
    free(memchunk);
    return -7;
  }

  /* create a bunch of file descriptors */

  for (num_open.rlim_cur = 1;
       num_open.rlim_cur < num_open.rlim_max;
       num_open.rlim_cur++) {

    fd[num_open.rlim_cur] = dup(fd[0]);

    if (fd[num_open.rlim_cur] < 0) {

      fd[num_open.rlim_cur] = -1;

      sprintf(strbuff1, fmt, num_open.rlim_cur);
      sprintf(strbuff, "dup() attempt %s failed", strbuff1);
      fprintf(stderr, "%s\n", strbuff);

      sprintf(strbuff1, fmt, num_open.rlim_cur);
      sprintf(strbuff, "fds system limit seems close to %s", strbuff1);
      fprintf(stderr, "%s\n", strbuff);

      num_open.rlim_max = num_open.rlim_cur - SAFETY_MARGIN;

      num_open.rlim_cur -= num_open.rlim_max;
      sprintf(strbuff1, fmt, num_open.rlim_cur);
      sprintf(strbuff, "closing %s file descriptors", strbuff1);
      fprintf(stderr, "%s\n", strbuff);

      for (num_open.rlim_cur = num_open.rlim_max;
           fd[num_open.rlim_cur] >= 0;
           num_open.rlim_cur++) {
        close(fd[num_open.rlim_cur]);
        fd[num_open.rlim_cur] = -1;
      }

      sprintf(strbuff, fmt, num_open.rlim_max);
      fprintf(stderr, "shrinking array for %s file descriptors\n", strbuff);

      /* we don't care if we can't shrink it */

      tmpfd = realloc(fd, sizeof(*fd) * (size_t)(num_open.rlim_max));
      if (tmpfd) {
        fd = tmpfd;
        tmpfd = NULL;
      }

      break;

    }

  }

  sprintf(strbuff, fmt, num_open.rlim_max);
  fprintf(stderr, "%s file descriptors open\n", strbuff);

#if !defined(HAVE_POLL_FINE)    && \
    !defined(USE_WINSOCK)       && \
    !defined(TPF)

  /*
   * when using select() instead of poll() we cannot test
   * libcurl functionality with a socket number equal or
   * greater than FD_SETSIZE. In any case, macro VERIFY_SOCK
   * in lib/select.c enforces this check and protects libcurl
   * from a possible crash. The effect of this protection
   * is that test 537 will always fail, since the actual
   * call to select() never takes place. We skip test 537
   * with an indication that select limit would be exceeded.
   */

  num_open.rlim_cur = FD_SETSIZE - SAFETY_MARGIN;
  if (num_open.rlim_max > num_open.rlim_cur) {
    sprintf(strbuff, "select limit is FD_SETSIZE %d", FD_SETSIZE);
    store_errmsg(strbuff, 0);
    fprintf(stderr, "%s\n", msgbuff);
    close_file_descriptors();
    free(memchunk);
    return -8;
  }

  num_open.rlim_cur = FD_SETSIZE - SAFETY_MARGIN;
  for (rl.rlim_cur = 0;
       rl.rlim_cur < num_open.rlim_max;
       rl.rlim_cur++) {
    if ((fd[rl.rlim_cur] > 0) &&
       ((unsigned int)fd[rl.rlim_cur] > num_open.rlim_cur)) {
      sprintf(strbuff, "select limit is FD_SETSIZE %d", FD_SETSIZE);
      store_errmsg(strbuff, 0);
      fprintf(stderr, "%s\n", msgbuff);
      close_file_descriptors();
      free(memchunk);
      return -9;
    }
  }

#endif /* using a FD_SETSIZE bound select() */

  /*
   * Old or 'backwards compatible' implementations of stdio do not allow
   * handling of streams with an underlying file descriptor number greater
   * than 255, even when allowing high numbered file descriptors for sockets.
   * At this point we have a big number of file descriptors which have been
   * opened using dup(), so lets test the stdio implementation and discover
   * if it is capable of fopen()ing some additional files.
   */

  if (!fopen_works()) {
    sprintf(strbuff1, fmt, num_open.rlim_max);
    sprintf(strbuff, "stdio fopen() fails with %s fds open()",
            strbuff1);
    fprintf(stderr, "%s\n", msgbuff);
    sprintf(strbuff, "stdio fopen() fails with lots of fds open()");
    store_errmsg(strbuff, 0);
    close_file_descriptors();
    free(memchunk);
    return -10;
  }

  /* free the chunk of memory we were reserving so that it
     becomes becomes available to the test */

  free(memchunk);

  /* close file descriptors unless instructed to keep them */

  if (!keep_open) {
    close_file_descriptors();
  }

  return 0;
}

int test(char *URL)
{
  CURLcode res;
  CURL *curl;

  if(!strcmp(URL, "check")) {
    /* used by the test script to ask if we can run this test or not */
    if(rlimit(FALSE)) {
      fprintf(stdout, "rlimit problem: %s\n", msgbuff);
      return 1;
    }
    return 0; /* sure, run this! */
  }

  if (rlimit(TRUE)) {
    /* failure */
    return TEST_ERR_MAJOR_BAD;
  }

  /* run the test with the bunch of open file descriptors
     and close them all once the test is over */

  if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) {
    fprintf(stderr, "curl_global_init() failed\n");
    close_file_descriptors();
    return TEST_ERR_MAJOR_BAD;
  }

  if ((curl = curl_easy_init()) == NULL) {
    fprintf(stderr, "curl_easy_init() failed\n");
    close_file_descriptors();
    curl_global_cleanup();
    return TEST_ERR_MAJOR_BAD;
  }

  test_setopt(curl, CURLOPT_URL, URL);
  test_setopt(curl, CURLOPT_HEADER, 1L);

  res = curl_easy_perform(curl);

test_cleanup:

  close_file_descriptors();
  curl_easy_cleanup(curl);
  curl_global_cleanup();

  return (int)res;
}