/*
** Initialize the thread private data manipulation
*/
void _PR_InitTPD(void)
{
    _pr_tpd_destructors = (PRThreadPrivateDTOR*)
        PR_CALLOC(_PR_TPD_LIMIT * sizeof(PRThreadPrivateDTOR*));
    PR_ASSERT(NULL != _pr_tpd_destructors);
    _pr_tpd_length = _PR_TPD_LIMIT;
}
示例#2
0
nsresult nsIMAPNamespaceList::InitFromString(const char *nameSpaceString, EIMAPNamespaceType nstype)
{
    nsresult rv = NS_OK;
    if (nameSpaceString)
    {
        int numNamespaces = UnserializeNamespaces(nameSpaceString, nullptr, 0);
        char **prefixes = (char**) PR_CALLOC(numNamespaces * sizeof(char*));
        if (prefixes)
        {
            int len = UnserializeNamespaces(nameSpaceString, prefixes, numNamespaces);
            for (int i = 0; i < len; i++)
            {
                char *thisns = prefixes[i];
                char delimiter = '/';	// a guess
                if (PL_strlen(thisns) >= 1)
                    delimiter = thisns[PL_strlen(thisns)-1];
                nsIMAPNamespace *ns = new nsIMAPNamespace(nstype, thisns, delimiter, true);
                if (ns)
                    AddNewNamespace(ns);
                PR_FREEIF(thisns);
            }
            PR_Free(prefixes);
        }
    }

    return rv;
}
NS_IMETHODIMP nsIMAPHostSessionList::SetNamespaceFromPrefForHost(const char *serverKey,
                                                                 const char *namespacePref, EIMAPNamespaceType nstype)
{
  PR_EnterMonitor(gCachedHostInfoMonitor);
  nsIMAPHostInfo *host = FindHost(serverKey);
  if (host)
  {
    if (namespacePref)
    {
      int numNamespaces = host->fNamespaceList->UnserializeNamespaces(namespacePref, nsnull, 0);
      char **prefixes = (char**) PR_CALLOC(numNamespaces * sizeof(char*));
      if (prefixes)
      {
        int len = host->fNamespaceList->UnserializeNamespaces(namespacePref, prefixes, numNamespaces);
        for (int i = 0; i < len; i++)
        {
          char *thisns = prefixes[i];
          char delimiter = '/';	// a guess
          if (PL_strlen(thisns) >= 1)
            delimiter = thisns[PL_strlen(thisns)-1];
          nsIMAPNamespace *ns = new nsIMAPNamespace(nstype, thisns, delimiter, true);
          if (ns)
            host->fNamespaceList->AddNewNamespace(ns);
          PR_FREEIF(thisns);
        }
        PR_Free(prefixes);
      }
    }
  }
  PR_ExitMonitor(gCachedHostInfoMonitor);
  return (host == NULL) ? NS_ERROR_ILLEGAL_VALUE : NS_OK;
}
示例#4
0
static StrItem* newStrItem(const char *s, StrItem *next)
{
    StrItem *p = (StrItem*)PR_CALLOC(sizeof(StrItem));
    p->next = next;
    p->s = s;
    p->refCnt = 1;
    return p;
}
示例#5
0
void _pr_vertexbuffer_singular_init(pr_vertexbuffer* vertexBuffer, PRsizei numVertices)
{
    if (vertexBuffer != NULL)
    {
        vertexBuffer->numVertices   = numVertices;
        vertexBuffer->vertices      = PR_CALLOC(pr_vertex, numVertices);
    }
}
示例#6
0
static void _vertexbuffer_resize(pr_vertexbuffer* vertexBuffer, PRsizei numVertices)
{
    // Check if vertex buffer must be reallocated
    if (vertexBuffer->vertices == NULL || vertexBuffer->numVertices != numVertices)
    {
        // Create new vertex buffer data
        PR_FREE(vertexBuffer->vertices);

        vertexBuffer->numVertices   = numVertices;
        vertexBuffer->vertices      = PR_CALLOC(pr_vertex, numVertices);
    }
}
示例#7
0
PR_IMPLEMENT(PRStatus) PR_NewThreadPrivateIndex(
    PRUintn *newIndex, PRThreadPrivateDTOR dtor)
{
    PRStatus rv;

    if (!_pr_initialized) _PR_ImplicitInitialization();


    if (_pr_tpd_highwater >= _PR_TPD_LIMIT)
    {
        PR_SetError(PR_TPD_RANGE_ERROR, 0);
	    rv = PR_FAILURE;  /* that's just wrong */
    }
    else
    {
        PRThreadPrivateDTOR *old = NULL;
		PRIntn _is;

        _PR_LOCK_TPINDEX();
        if (_pr_tpd_highwater >= _pr_tpd_length)
        {
            old = _pr_tpd_destructors;
            _pr_tpd_destructors = PR_CALLOC(
                (_pr_tpd_length + _PR_TPD_MODULO) * sizeof(PRThreadPrivateDTOR*));
            if (NULL == _pr_tpd_destructors)
            {
                _pr_tpd_destructors = old; old = NULL;
                PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
	            rv = PR_FAILURE;  /* that's just wrong */
	            goto failed;  /* then extract one's self */
	        }
	        else
	        {
                memcpy(
                    _pr_tpd_destructors, old,
                    _pr_tpd_length * sizeof(PRThreadPrivateDTOR*));
                _pr_tpd_length += _PR_TPD_MODULO;
	        }
        }
        
        *newIndex = _pr_tpd_highwater++;  /* this is really all we wanted */
        _pr_tpd_destructors[*newIndex] = dtor;  /* record destructor @index */

failed:
        _PR_UNLOCK_TPINDEX();
        if (NULL != old) PR_DELETE(old);
        rv = PR_SUCCESS;
    }

    return rv;
}
PR_IMPLEMENT(PRStatus) PR_SetThreadPrivate(PRUintn index, void *priv)
{
    PRThread *self = PR_GetCurrentThread();

    /*
    ** The index being set might not have a sufficient vector in this
    ** thread. But if the index has been allocated, it's okay to go
    ** ahead and extend this one now.
    */
    if ((index >= _PR_TPD_LIMIT) || (index >= _pr_tpd_highwater))
    {
        PR_SetError(PR_TPD_RANGE_ERROR, 0);
        return PR_FAILURE;
    }

    PR_ASSERT(((NULL == self->privateData) && (0 == self->tpdLength))
        || ((NULL != self->privateData) && (0 != self->tpdLength)));

    if ((NULL == self->privateData) || (self->tpdLength <= index))
    {
        void *extension = PR_CALLOC(_pr_tpd_length * sizeof(void*));
        if (NULL == extension)
        {
            PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
            return PR_FAILURE;
        }
        if (self->privateData) {
            (void)memcpy(
                extension, self->privateData,
                self->tpdLength * sizeof(void*));
            PR_DELETE(self->privateData);
        }
        self->tpdLength = _pr_tpd_length;
        self->privateData = (void**)extension;
    }
    /*
    ** There wasn't much chance of having to call the destructor
    ** unless the slot already existed.
    */
    else if (self->privateData[index] && _pr_tpd_destructors[index])
    {
        void *data = self->privateData[index];
        self->privateData[index] = NULL;
        (*_pr_tpd_destructors[index])(data);
    }

    PR_ASSERT(index < self->tpdLength);
    self->privateData[index] = priv;

    return PR_SUCCESS;
}
示例#9
0
PR_IMPLEMENT(PRWaitGroup*) PR_CreateWaitGroup(PRInt32 size /* ignored */)
{
    PRWaitGroup *wg = NULL;
    if (PR_FAILURE == MW_Init()) goto failed;

    if (NULL == (wg = PR_NEWZAP(PRWaitGroup))) goto failed;
    /* the wait group itself */
    wg->ml = PR_NewLock();
    if (NULL == wg->ml) goto failed_lock;
    wg->io_taken = PR_NewCondVar(wg->ml);
    if (NULL == wg->io_taken) goto failed_cvar0;
    wg->io_complete = PR_NewCondVar(wg->ml);
    if (NULL == wg->io_complete) goto failed_cvar1;
    wg->new_business = PR_NewCondVar(wg->ml);
    if (NULL == wg->new_business) goto failed_cvar2;
    wg->mw_manage = PR_NewCondVar(wg->ml);
    if (NULL == wg->mw_manage) goto failed_cvar3;

    PR_INIT_CLIST(&wg->group_link);
    PR_INIT_CLIST(&wg->io_ready);

    /* the waiters sequence */
    wg->waiter = (_PRWaiterHash*)PR_CALLOC(
        sizeof(_PRWaiterHash) +
        (_PR_DEFAULT_HASH_LENGTH * sizeof(PRRecvWait*)));
    if (NULL == wg->waiter) goto failed_waiter;
    wg->waiter->count = 0;
    wg->waiter->length = _PR_DEFAULT_HASH_LENGTH;

    PR_Lock(mw_lock);
    PR_APPEND_LINK(&wg->group_link, &mw_state->group_list);
    PR_Unlock(mw_lock);
    return wg;

failed_waiter:
    PR_DestroyCondVar(wg->mw_manage);
failed_cvar3:
    PR_DestroyCondVar(wg->new_business);
failed_cvar2:
    PR_DestroyCondVar(wg->io_taken);
failed_cvar1:
    PR_DestroyCondVar(wg->io_complete);
failed_cvar0:
    PR_DestroyLock(wg->ml);
failed_lock:
    PR_DELETE(wg);

failed:
    return wg;
}  /* MW_CreateWaitGroup */
示例#10
0
文件: prtpool.c 项目: Akesure/jxcore
static PRThreadPool *
alloc_threadpool(void)
{
PRThreadPool *tp;

	tp = (PRThreadPool *) PR_CALLOC(sizeof(*tp));
	if (NULL == tp)
		goto failed;
	tp->jobq.lock = PR_NewLock();
	if (NULL == tp->jobq.lock)
		goto failed;
	tp->jobq.cv = PR_NewCondVar(tp->jobq.lock);
	if (NULL == tp->jobq.cv)
		goto failed;
	tp->join_lock = PR_NewLock();
	if (NULL == tp->join_lock)
		goto failed;
#ifdef OPT_WINNT
	tp->jobq.nt_completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE,
									NULL, 0, 0);
	if (NULL == tp->jobq.nt_completion_port)
		goto failed;
#endif

	tp->ioq.lock = PR_NewLock();
	if (NULL == tp->ioq.lock)
		goto failed;

	/* Timer queue */

	tp->timerq.lock = PR_NewLock();
	if (NULL == tp->timerq.lock)
		goto failed;
	tp->timerq.cv = PR_NewCondVar(tp->timerq.lock);
	if (NULL == tp->timerq.cv)
		goto failed;

	tp->shutdown_cv = PR_NewCondVar(tp->jobq.lock);
	if (NULL == tp->shutdown_cv)
		goto failed;
	tp->ioq.notify_fd = PR_NewPollableEvent();
	if (NULL == tp->ioq.notify_fd)
		goto failed;
	return tp;
failed:
	delete_threadpool(tp);
	PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
	return NULL;
}
示例#11
0
static _PR_HashStory MW_ExpandHashInternal(PRWaitGroup *group)
{
    PRRecvWait **desc;
    PRUint32 pidx, length = 0;
    _PRWaiterHash *newHash, *oldHash = group->waiter;
    

    static const PRInt32 prime_number[] = {
        _PR_DEFAULT_HASH_LENGTH, 179, 521, 907, 1427,
        2711, 3917, 5021, 8219, 11549, 18911, 26711, 33749, 44771};
    PRUintn primes = (sizeof(prime_number) / sizeof(PRIntn));

    /* look up the next size we'd like to use for the hash table */
    for (pidx = 0; pidx < primes; ++pidx)
    {
        if (prime_number[pidx] == oldHash->length)
        {
            length = prime_number[pidx + 1];
            break;
        }
    }
    if (0 == length)
    {
        PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
        return _prmw_error;  /* we're hosed */
    }

    /* allocate the new hash table and fill it in with the old */
    newHash = (_PRWaiterHash*)PR_CALLOC(
        sizeof(_PRWaiterHash) + (length * sizeof(PRRecvWait*)));

    newHash->length = length;
    for (desc = &oldHash->recv_wait; newHash->count < oldHash->count; ++desc)
    {
        if (NULL != *desc)
        {
            if (_prmw_success != MW_AddHashInternal(*desc, newHash))
            {
                PR_ASSERT(!"But, but, but ...");
                PR_DELETE(newHash);
                return _prmw_error;
            }
        }
    }
    PR_DELETE(group->waiter);
    group->waiter = newHash;
    return _prmw_success;
}  /* MW_ExpandHashInternal */
示例#12
0
char* dupStr(const char *s, unsigned int size)
{
    char *t;
    if  (size == 0) {
  size = PL_strlen(s);
  }
  t = (char*)PR_CALLOC(size+1);
  if (t) {
  memcpy(t,s,size);
  t[size] = 0;
  return t;
  }
    else {
  return (char*)0;
  }
}
示例#13
0
void initLex(const char *inputstring, unsigned long inputlen) {
  /* initialize lex mode stack */
  lexBuf.lexModeStack[lexBuf.lexModeStackTop = 0] = L_NORMAL;

  /* iniatialize lex buffer. */
  lexBuf.inputString = (char *)inputstring;
  lexBuf.inputLen = inputlen;
  lexBuf.curPos = 0;

  lexBuf.len = 0;
  lexBuf.getPtr = 0;

  lexBuf.maxToken = PR_MAXTOKEN;
  lexBuf.strs = (char *)PR_CALLOC(PR_MAXTOKEN);
  lexBuf.strsLen = 0;
}
示例#14
0
nsMsgLineStreamBuffer::nsMsgLineStreamBuffer(PRUint32 aBufferSize, bool aAllocateNewLines, bool aEatCRLFs, char aLineToken) 
           : m_eatCRLFs(aEatCRLFs), m_allocateNewLines(aAllocateNewLines), m_lineToken(aLineToken)
{
  NS_PRECONDITION(aBufferSize > 0, "invalid buffer size!!!");
  m_dataBuffer = nsnull;
  m_startPos = 0;
    m_numBytesInBuffer = 0;

  // used to buffer incoming data by ReadNextLineFromInput
  if (aBufferSize > 0)
  {
    m_dataBuffer = (char *) PR_CALLOC(sizeof(char) * aBufferSize);
  }

  m_dataBufferSize = aBufferSize;
}
NTStatsServer::NTStatsServer(void) :
                Thread("NT Stats Thread"),
                statsMsgHandler_(*this) // Reference to StatsUpdateHandler
{
    maxPollItems_ = 10;         // Arbitrary number
    nPollItems_ = 0;
    pollEvents_ = (HANDLE*) PR_CALLOC(maxPollItems_* sizeof(HANDLE));
    if (pollEvents_ != NULL)
    {
        int nIndex = 0;
        for (nIndex = 0; nIndex < maxPollItems_; ++nIndex)
        {
            pollEvents_[nIndex] = INVALID_HANDLE_VALUE;
        }
    }
    fdArray_ = new StatsFileDesc[maxPollItems_];
}
示例#16
0
pr_context* _pr_context_create(const PRcontextdesc* desc, PRuint width, PRuint height)
{
    if (desc == NULL || desc->window == NULL || width <= 0 || height <= 0)
    {
        _pr_error_set(PR_ERROR_INVALID_ARGUMENT, __FUNCTION__);
        return NULL;
    }

    // Create render context
    pr_context* context = PR_MALLOC(pr_context);

    // Setup bitmap info structure
    BITMAPINFO* bmi = (&context->bmpInfo);
    memset(bmi, 0, sizeof(BITMAPINFO));

    bmi->bmiHeader.biSize           = sizeof(BITMAPINFOHEADER);
    bmi->bmiHeader.biWidth          = (LONG)width;
    bmi->bmiHeader.biHeight         = (LONG)height;
    bmi->bmiHeader.biPlanes         = 1;
    bmi->bmiHeader.biBitCount       = 24;
    bmi->bmiHeader.biCompression    = BI_RGB;

    // Setup context
    context->wnd        = *((HWND*)desc->window);
    context->dc         = GetDC(context->wnd);
    context->dcBmp      = CreateCompatibleDC(context->dc);
    context->bmp        = CreateCompatibleBitmap(context->dc, width, height);
    context->colors     = PR_CALLOC(pr_color, width*height);
    context->width      = width;
    context->height     = height;

    SelectObject(context->dcBmp, context->bmp);

    // Create color palette
    context->colorPalette = PR_MALLOC(pr_color_palette);
    _pr_color_palette_fill_r3g3b2(context->colorPalette);

    // Initialize state machine
    _pr_state_machine_init(&(context->stateMachine));
    _pr_context_makecurrent(context);

    return context;
}
void StatsBufferReader::initialize(const void* buffer,
                                   int bufLen,
                                   PRBool fCopy)
{
    nCurIndex_ = 0;
    if (fCopy == PR_TRUE)
    {
        if (bufLen > 0)
        {
            buffer_ = PR_CALLOC(bufLen);
            memcpy(buffer_, buffer, bufLen);
        }
        else
        {
            buffer_ = 0;
        }
    }
    else
    {
        buffer_ = const_cast <void*>(buffer);
    }
    nSize_ = bufLen;
}
示例#18
0
PR_IMPLEMENT(PRStatus) PR_SetThreadExit(PRUintn index, PRThreadExit func, void *arg)
{
    _PRPerThreadExit *pte;
    PRThread *thread = _PR_MD_CURRENT_THREAD();

    if (index >= thread->numExits) {
	if (thread->ptes) {
	    thread->ptes = (_PRPerThreadExit*)
		PR_REALLOC(thread->ptes, (index+1) * sizeof(_PRPerThreadExit));
	} else {
	    thread->ptes = (_PRPerThreadExit*)
	        PR_CALLOC(index+1 * sizeof(_PRPerThreadExit));
	}
	if (!thread->ptes) {
	    PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
	    return PR_FAILURE;
	}
	thread->numExits = index + 1;
    }
    pte = &thread->ptes[index];
    pte->func = func;
    pte->arg = arg;
    return PR_SUCCESS;
}
示例#19
0
void _PR_InitGC(PRWord flags)
{
    static char firstTime = 1;

    if (!firstTime) return;
    firstTime = 0;

    _MD_InitGC();

    if (flags == 0) {
	char *ev = PR_GetEnv("GCLOG");
	if (ev && ev[0]) {
	    flags = atoi(ev);
	}
    }
    _pr_gcData.flags = flags;

    _pr_gcData.lock = PR_NewMonitor();

    _pr_collectorTypes = (CollectorType*) PR_CALLOC(256 * sizeof(CollectorType));

    PR_RegisterRootFinder(ScanThreads, "scan threads", 0);
    PR_RegisterRootFinder(_PR_ScanFinalQueue, "scan final queue", 0);
}
void 
CondVarMixedTest(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *ptcount;

    exitcount=0;
    tcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg*4; ) {
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_LOCAL_THREAD);
        index++;
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_GLOBAL_THREAD);
        index++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);
        index++;
	ptcount++;

        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_GLOBAL_THREAD);
        index++;
	ptcount++;
    }


    /* Notify every 3rd thread */
    for (loops = 0; loops < count; loops++) {

        /* Notify the threads */
        for(index=0; index<(arg*4); index+=3) {

            PR_Lock(list[index].lock);
            *list[index].tcount++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);

        }
        /* Wait for threads to finish */
        PR_Lock(exitlock);
        while(exitcount < arg*4)
            PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
        PR_ASSERT(exitcount >= arg*4);
        exitcount -= arg*4;
        PR_Unlock(exitlock);
    }

    /* Join all the threads */
    for(index=0; index<(arg*4); index++) {
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);

    PR_DELETE(list);
}
void 
CondVarTest(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *ptcount, *saved_ptcount;

    exitcount=0;
    tcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    saved_ptcount = ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg*4; ) {
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_LOCAL_THREAD);

        index++;
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_GLOBAL_THREAD);

        index++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);
        index++;
	ptcount++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_GLOBAL_THREAD);

        index++;
	ptcount++;
    }

    for (loops = 0; loops < count; loops++) {

        /* Notify the threads */
        for(index=0; index<(arg*4); index++) {
            PR_Lock(list[index].lock);
            (*list[index].tcount)++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
        }

#if 0
        printf("wait for threads done\n");
#endif

        /* Wait for threads to finish */
        PR_Lock(exitlock);
        while(exitcount < arg*4)
            PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
        PR_ASSERT(exitcount >= arg*4);
        exitcount -= arg*4;
        PR_Unlock(exitlock);
#if 0
        printf("threads ready\n");
#endif
    }

    /* Join all the threads */
    for(index=0; index<(arg*4); index++) {
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);
    PR_DestroyCondVar(exitcvar);
    PR_DestroyLock(exitlock);

    PR_DELETE(list);
    PR_DELETE(saved_ptcount);
}
示例#22
0
int select(int width, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *tv)
#endif
{
    int osfd;
    _PRUnixPollDesc *unixpds, *unixpd, *eunixpd;
    PRInt32 pdcnt;
    PRIntervalTime timeout;
    int retVal;
#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
    fd_set *rd = (fd_set*) rl;
    fd_set *wr = (fd_set*) wl;
    fd_set *ex = (fd_set*) el;
#endif

#if 0
    /*
     * Easy special case: zero timeout.  Simply call the native
     * select() with no fear of blocking.
     */
    if (tv != NULL && tv->tv_sec == 0 && tv->tv_usec == 0) {
#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
        return _MD_SELECT(width, rl, wl, el, tv);
#else
        return _MD_SELECT(width, rd, wr, ex, tv);
#endif
    }
#endif

    if (!_pr_initialized) {
        _PR_ImplicitInitialization();
    }
		
#ifndef _PR_LOCAL_THREADS_ONLY
    if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
        return _MD_SELECT(width, rd, wr, ex, tv);	
    }
#endif

    if (width < 0 || width > FD_SETSIZE) {
        errno = EINVAL;
        return -1;
    }

    /* Compute timeout */
    if (tv) {
        /*
         * These acceptable ranges for t_sec and t_usec are taken
         * from the select() man pages.
         */
        if (tv->tv_sec < 0 || tv->tv_sec > 100000000
                || tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
            errno = EINVAL;
            return -1;
        }

        /* Convert microseconds to ticks */
        timeout = PR_MicrosecondsToInterval(1000000*tv->tv_sec + tv->tv_usec);
    } else {
        /* tv being a NULL pointer means blocking indefinitely */
        timeout = PR_INTERVAL_NO_TIMEOUT;
    }

    /* Check for no descriptors case (just doing a timeout) */
    if ((!rd && !wr && !ex) || !width) {
        PR_Sleep(timeout);
        return 0;
    }

    /*
     * Set up for PR_Poll().  The PRPollDesc array is allocated
     * dynamically.  If this turns out to have high performance
     * penalty, one can change to use a large PRPollDesc array
     * on the stack, and allocate dynamically only when it turns
     * out to be not large enough.
     *
     * I allocate an array of size 'width', which is the maximum
     * number of fds we may need to poll.
     */
    unixpds = (_PRUnixPollDesc *) PR_CALLOC(width * sizeof(_PRUnixPollDesc));
    if (!unixpds) {
        errno = ENOMEM;
        return -1;
    }

    pdcnt = 0;
    unixpd = unixpds;
    for (osfd = 0; osfd < width; osfd++) {
        int in_flags = 0;
        if (rd && FD_ISSET(osfd, rd)) {
            in_flags |= _PR_UNIX_POLL_READ;
        }
        if (wr && FD_ISSET(osfd, wr)) {
            in_flags |= _PR_UNIX_POLL_WRITE;
        }
        if (ex && FD_ISSET(osfd, ex)) {
            in_flags |= _PR_UNIX_POLL_EXCEPT;
        }
        if (in_flags) {
            unixpd->osfd = osfd;
            unixpd->in_flags = in_flags;
            unixpd->out_flags = 0;
            unixpd++;
            pdcnt++;
        }
    }

    /*
     * see comments in mozilla/cmd/xfe/mozilla.c (look for
     * "PR_XGetXtHackFD")
     */
   {
     int needToLockXAgain;
 
     needToLockXAgain = 0;
     if (rd && (_pr_xt_hack_fd != -1)
             && FD_ISSET(_pr_xt_hack_fd, rd) && PR_XIsLocked()
             && (!_pr_xt_hack_okayToReleaseXLock
             || _pr_xt_hack_okayToReleaseXLock())) {
         PR_XUnlock();
         needToLockXAgain = 1;
     }

    /* This is the potentially blocking step */
    retVal = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout);

     if (needToLockXAgain) {
         PR_XLock();
     }
   }

    if (retVal > 0) {
        /* Compute select results */
        if (rd) ZAP_SET(rd, width);
        if (wr) ZAP_SET(wr, width);
        if (ex) ZAP_SET(ex, width);

        /*
         * The return value can be either the number of ready file
         * descriptors or the number of set bits in the three fd_set's.
         */
        retVal = 0;  /* we're going to recompute */
        eunixpd = unixpds + pdcnt;
        for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
            if (unixpd->out_flags) {
                int nbits = 0;  /* The number of set bits on for this fd */

                if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) {
                    errno = EBADF;
                    PR_LOG(_pr_io_lm, PR_LOG_ERROR,
                            ("select returns EBADF for %d", unixpd->osfd));
                    retVal = -1;
                    break;
                }
                /*
                 * If a socket has a pending error, it is considered
                 * both readable and writable.  (See W. Richard Stevens,
                 * Unix Network Programming, Vol. 1, 2nd Ed., Section 6.3,
                 * pp. 153-154.)  We also consider a socket readable if
                 * it has a hangup condition.
                 */
                if (rd && (unixpd->in_flags & _PR_UNIX_POLL_READ)
                        && (unixpd->out_flags & (_PR_UNIX_POLL_READ
                        | _PR_UNIX_POLL_ERR | _PR_UNIX_POLL_HUP))) {
                    FD_SET(unixpd->osfd, rd);
                    nbits++;
                }
                if (wr && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
                        && (unixpd->out_flags & (_PR_UNIX_POLL_WRITE
                        | _PR_UNIX_POLL_ERR))) {
                    FD_SET(unixpd->osfd, wr);
                    nbits++;
                }
                if (ex && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
                        && (unixpd->out_flags & PR_POLL_EXCEPT)) {
                    FD_SET(unixpd->osfd, ex);
                    nbits++;
                }
                PR_ASSERT(nbits > 0);
#if defined(HPUX) || defined(SOLARIS) || defined(SUNOS4) || defined(OSF1) || defined(AIX)
                retVal += nbits;
#else /* IRIX */
                retVal += 1;
#endif
            }
        }
    }

    PR_ASSERT(tv || retVal != 0);
    PR_LOG(_pr_io_lm, PR_LOG_MIN, ("select returns %d", retVal));
    PR_DELETE(unixpds);

    return retVal;
}
static PRInt32 PR_CALLBACK SocketWritev(PRFileDesc *fd, const PRIOVec *iov,
PRInt32 iov_size, PRIntervalTime timeout)
{
	PRThread *me = _PR_MD_CURRENT_THREAD();
	int w = 0;
	const PRIOVec *tmp_iov;
#define LOCAL_MAXIOV    8
	PRIOVec local_iov[LOCAL_MAXIOV];
	PRIOVec *iov_copy = NULL;
	int tmp_out;
	int index, iov_cnt;
	int count=0, sz = 0;    /* 'count' is the return value. */

	if (_PR_PENDING_INTERRUPT(me)) {
		me->flags &= ~_PR_INTERRUPT;
		PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
		return -1;
	}
	if (_PR_IO_PENDING(me)) {
		PR_SetError(PR_IO_PENDING_ERROR, 0);
		return -1;
	}

    /*
     * Assume the first writev will succeed.  Copy iov's only on
     * failure.
     */
    tmp_iov = iov;
    for (index = 0; index < iov_size; index++)
        sz += iov[index].iov_len;

	iov_cnt = iov_size;

	while (sz > 0) {

		w = _PR_MD_WRITEV(fd, tmp_iov, iov_cnt, timeout);
		if (w < 0) {
			count = -1;
			break;
		}
		count += w;
		if (fd->secret->nonblocking) {
			break;
		}
		sz -= w;

		if (sz > 0) {
			/* find the next unwritten vector */
			for ( index = 0, tmp_out = count;
				tmp_out >= iov[index].iov_len;
				tmp_out -= iov[index].iov_len, index++){;} /* nothing to execute */

			if (tmp_iov == iov) {
				/*
				 * The first writev failed so we
				 * must copy iov's around.
				 * Avoid calloc/free if there
				 * are few enough iov's.
				 */
				if (iov_size - index <= LOCAL_MAXIOV)
					iov_copy = local_iov;
				else if ((iov_copy = (PRIOVec *) PR_CALLOC((iov_size - index) *
					sizeof *iov_copy)) == NULL) {
					PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
					return -1;
				}
				tmp_iov = iov_copy;
			}

			PR_ASSERT(tmp_iov == iov_copy);

			/* fill in the first partial read */
			iov_copy[0].iov_base = &(((char *)iov[index].iov_base)[tmp_out]);
			iov_copy[0].iov_len = iov[index].iov_len - tmp_out;
			index++;

			/* copy the remaining vectors */
			for (iov_cnt=1; index<iov_size; iov_cnt++, index++) {
				iov_copy[iov_cnt].iov_base = iov[index].iov_base;
				iov_copy[iov_cnt].iov_len = iov[index].iov_len;
			}
		}
	}

	if (iov_copy != local_iov)
		PR_DELETE(iov_copy);
	return count;
}
int main(int argc, char** argv)
{
    PRUintn index;
    PRBool boolean;
    CSClient_t *client;
    PRStatus rv, joinStatus;
    CSServer_t *server = NULL;

    PRUintn backlog = DEFAULT_BACKLOG;
    PRUintn clients = DEFAULT_CLIENTS;
    const char *serverName = DEFAULT_SERVER;
    PRBool serverIsLocal = PR_TRUE;
    PRUintn accepting = ALLOWED_IN_ACCEPT;
    PRUintn workersMin = DEFAULT_WORKERS_MIN;
    PRUintn workersMax = DEFAULT_WORKERS_MAX;
    PRIntn execution = DEFAULT_EXECUTION_TIME;
    PRIntn low = DEFAULT_LOW, high = DEFAULT_HIGH;

    /*
     * -G           use global threads
     * -a <n>       threads allowed in accept
     * -b <n>       backlock for listen
     * -c <threads> number of clients to create
     * -f <low>     low water mark for caching FDs
     * -F <high>    high water mark for caching FDs
     * -w <threads> minimal number of server threads
     * -W <threads> maximum number of server threads
     * -e <seconds> duration of the test in seconds
     * -s <string>  dsn name of server (implies no server here)
     * -v           verbosity
     */

    PLOptStatus os;
    PLOptState *opt = PL_CreateOptState(argc, argv, "GX6b:a:c:f:F:w:W:e:s:vdhp");

    debug_out = PR_GetSpecialFD(PR_StandardError);

    while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
    {
        if (PL_OPT_BAD == os) continue;
        switch (opt->option)
        {
        case 'G':  /* use global threads */
            thread_scope = PR_GLOBAL_THREAD;
            break;
        case 'X':  /* use XTP as transport */
            protocol = 36;
            break;
        case '6':  /* Use IPv6 */
            domain = PR_AF_INET6;
            break;
        case 'a':  /* the value for accepting */
            accepting = atoi(opt->value);
            break;
        case 'b':  /* the value for backlock */
            backlog = atoi(opt->value);
            break;
        case 'c':  /* number of client threads */
            clients = atoi(opt->value);
            break;
        case 'f':  /* low water fd cache */
            low = atoi(opt->value);
            break;
        case 'F':  /* low water fd cache */
            high = atoi(opt->value);
            break;
        case 'w':  /* minimum server worker threads */
            workersMin = atoi(opt->value);
            break;
        case 'W':  /* maximum server worker threads */
            workersMax = atoi(opt->value);
            break;
        case 'e':  /* program execution time in seconds */
            execution = atoi(opt->value);
            break;
        case 's':  /* server's address */
            serverName = opt->value;
            break;
        case 'v':  /* verbosity */
            verbosity = IncrementVerbosity();
            break;
        case 'd':  /* debug mode */
            debug_mode = PR_TRUE;
            break;
        case 'p':  /* pthread mode */
            pthread_stats = PR_TRUE;
            break;
        case 'h':
        default:
            Help();
            return 2;
        }
    }
    PL_DestroyOptState(opt);

    if (0 != PL_strcmp(serverName, DEFAULT_SERVER)) serverIsLocal = PR_FALSE;
    if (0 == execution) execution = DEFAULT_EXECUTION_TIME;
    if (0 == workersMax) workersMax = DEFAULT_WORKERS_MAX;
    if (0 == workersMin) workersMin = DEFAULT_WORKERS_MIN;
    if (0 == accepting) accepting = ALLOWED_IN_ACCEPT;
    if (0 == backlog) backlog = DEFAULT_BACKLOG;

    if (workersMin > accepting) accepting = workersMin;

    PR_STDIO_INIT();
    TimeOfDayMessage("Client/Server started at", PR_GetCurrentThread());

    cltsrv_log_file = PR_NewLogModule("cltsrv_log");
    MY_ASSERT(NULL != cltsrv_log_file);
    boolean = PR_SetLogFile("cltsrv.log");
    MY_ASSERT(boolean);

    rv = PR_SetFDCacheSize(low, high);
    PR_ASSERT(PR_SUCCESS == rv);

    if (serverIsLocal)
    {
        /* Establish the server */
        TEST_LOG(
            cltsrv_log_file, TEST_LOG_INFO,
            ("main(0x%p): starting server\n", PR_GetCurrentThread()));

        server = PR_NEWZAP(CSServer_t);
        PR_INIT_CLIST(&server->list);
        server->state = cs_init;
        server->ml = PR_NewLock();
        server->backlog = backlog;
        server->port = DEFAULT_PORT;
        server->workers.minimum = workersMin;
        server->workers.maximum = workersMax;
        server->workers.accepting = accepting;
        server->stateChange = PR_NewCondVar(server->ml);
        server->pool.exiting = PR_NewCondVar(server->ml);
        server->pool.acceptComplete = PR_NewCondVar(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE,
            ("main(0x%p): creating server thread\n", PR_GetCurrentThread()));

        server->thread = PR_CreateThread(
            PR_USER_THREAD, Server, server, PR_PRIORITY_HIGH,
            thread_scope, PR_JOINABLE_THREAD, 0);
        TEST_ASSERT(NULL != server->thread);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): waiting for server init\n", PR_GetCurrentThread()));

        PR_Lock(server->ml);
        while (server->state == cs_init)
            PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT);
        PR_Unlock(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): server init complete (port #%d)\n",
            PR_GetCurrentThread(), server->port));
    }

    if (clients != 0)
    {
        /* Create all of the clients */
        PRHostEnt host;
        char buffer[BUFFER_SIZE];
        client = (CSClient_t*)PR_CALLOC(clients * sizeof(CSClient_t));

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): creating %d client threads\n",
            PR_GetCurrentThread(), clients));
        
        if (!serverIsLocal)
        {
            rv = PR_GetHostByName(serverName, buffer, BUFFER_SIZE, &host);
            if (PR_SUCCESS != rv)
            {
                PL_FPrintError(PR_STDERR, "PR_GetHostByName");
                return 2;
            }
        }

        for (index = 0; index < clients; ++index)
        {
            client[index].state = cs_init;
            client[index].ml = PR_NewLock();
            if (serverIsLocal)
            {
				if (PR_AF_INET6 != domain)
                	(void)PR_InitializeNetAddr(
                    	PR_IpAddrLoopback, DEFAULT_PORT,
                    	&client[index].serverAddress);
				else
					rv = PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6,
							DEFAULT_PORT, &client[index].serverAddress);
            }
            else
            {
                (void)PR_EnumerateHostEnt(
                    0, &host, DEFAULT_PORT, &client[index].serverAddress);
            }
            client[index].stateChange = PR_NewCondVar(client[index].ml);
            TEST_LOG(
                cltsrv_log_file, TEST_LOG_INFO,
                ("main(0x%p): creating client threads\n", PR_GetCurrentThread()));
            client[index].thread = PR_CreateThread(
                PR_USER_THREAD, Client, &client[index], PR_PRIORITY_NORMAL,
                thread_scope, PR_JOINABLE_THREAD, 0);
            TEST_ASSERT(NULL != client[index].thread);
            PR_Lock(client[index].ml);
            while (cs_init == client[index].state)
                PR_WaitCondVar(client[index].stateChange, PR_INTERVAL_NO_TIMEOUT);
            PR_Unlock(client[index].ml);
        }
    }

    /* Then just let them go at it for a bit */
    TEST_LOG(
        cltsrv_log_file, TEST_LOG_ALWAYS,
        ("main(0x%p): waiting for execution interval (%d seconds)\n",
        PR_GetCurrentThread(), execution));

    WaitForCompletion(execution);

    TimeOfDayMessage("Shutting down", PR_GetCurrentThread());

    if (clients != 0)
    {
        for (index = 0; index < clients; ++index)
        {
            TEST_LOG(cltsrv_log_file, TEST_LOG_STATUS, 
                ("main(0x%p): notifying client(0x%p) to stop\n",
                PR_GetCurrentThread(), client[index].thread));

            PR_Lock(client[index].ml);
            if (cs_run == client[index].state)
            {
                client[index].state = cs_stop;
                PR_Interrupt(client[index].thread);
                while (cs_stop == client[index].state)
                    PR_WaitCondVar(
                        client[index].stateChange, PR_INTERVAL_NO_TIMEOUT);
            }
            PR_Unlock(client[index].ml);

            TEST_LOG(cltsrv_log_file, TEST_LOG_VERBOSE, 
                ("main(0x%p): joining client(0x%p)\n",
                PR_GetCurrentThread(), client[index].thread));

		    joinStatus = PR_JoinThread(client[index].thread);
		    TEST_ASSERT(PR_SUCCESS == joinStatus);
            PR_DestroyCondVar(client[index].stateChange);
            PR_DestroyLock(client[index].ml);
        }
        PR_DELETE(client);
    }

    if (NULL != server)
    {
        /* All clients joined - retrieve the server */
        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE, 
            ("main(0x%p): notifying server(0x%p) to stop\n",
            PR_GetCurrentThread(), server->thread));

        PR_Lock(server->ml);
        server->state = cs_stop;
        PR_Interrupt(server->thread);
        while (cs_exit != server->state)
            PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT);
        PR_Unlock(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE, 
            ("main(0x%p): joining server(0x%p)\n",
            PR_GetCurrentThread(), server->thread));
        joinStatus = PR_JoinThread(server->thread);
        TEST_ASSERT(PR_SUCCESS == joinStatus);

        PR_DestroyCondVar(server->stateChange);
        PR_DestroyCondVar(server->pool.exiting);
        PR_DestroyCondVar(server->pool.acceptComplete);
        PR_DestroyLock(server->ml);
        PR_DELETE(server);
    }

    TEST_LOG(
        cltsrv_log_file, TEST_LOG_ALWAYS, 
        ("main(0x%p): test complete\n", PR_GetCurrentThread()));

    PT_FPrintStats(debug_out, "\nPThread Statistics\n");

    TimeOfDayMessage("Test exiting at", PR_GetCurrentThread());
    PR_Cleanup();
    return 0;
}  /* main */
示例#25
0
static PRStatus _MW_PollInternal(PRWaitGroup *group)
{
    PRRecvWait **waiter;
    PRStatus rv = PR_FAILURE;
    PRUintn count, count_ready;
    PRIntervalTime polling_interval;

    group->poller = PR_GetCurrentThread();

    PR_Unlock(group->ml);

    while (PR_TRUE)
    {
        PRIntervalTime now, since_last_poll;
        PRPollDesc *poll_list = group->polling_list;
        /*
        ** There's something to do. See if our existing polling list
        ** is large enough for what we have to do?
        */

        while (group->polling_count < group->waiter->count)
        {
            PRUint32 old_count = group->waiter->count;
            PRUint32 new_count = PR_ROUNDUP(old_count, _PR_POLL_COUNT_FUDGE);
            PRSize new_size = sizeof(PRPollDesc) * new_count;
            poll_list = (PRPollDesc*)PR_CALLOC(new_size);
            if (NULL == poll_list) goto failed_alloc;
            if (NULL != group->polling_list)
                PR_DELETE(group->polling_list);
            group->polling_list = poll_list;
            group->polling_count = new_count;
        }

        now = PR_IntervalNow();
        polling_interval = max_polling_interval;
        since_last_poll = now - group->last_poll;
        PR_Lock(group->ml);
        waiter = &group->waiter->recv_wait;

        for (count = 0; count < group->waiter->count; ++waiter)
        {
            if (NULL != *waiter)  /* a live one! */
            {
                if (since_last_poll >= (*waiter)->timeout)
                    _MW_DoneInternal(group, waiter, PR_MW_TIMEOUT);
                else
                {
                    if (PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout)
                    {
                        (*waiter)->timeout -= since_last_poll;
                        if ((*waiter)->timeout < polling_interval)
                            polling_interval = (*waiter)->timeout;
                    }
                    poll_list->fd = (*waiter)->fd;
                    poll_list->in_flags = PR_POLL_READ;
                    poll_list->out_flags = 0;
#if 0
                    printf(
                        "Polling 0x%x[%d]: [fd: 0x%x, tmo: %u]\n",
                        poll_list, count, poll_list->fd, (*waiter)->timeout);
#endif
                    poll_list += 1;
                    count += 1;
                }
            }
        } 

        PR_ASSERT(count == group->waiter->count);
        if (0 == count) break;

        group->last_poll = now;

        PR_Unlock(group->ml);

        count_ready = PR_Poll(group->polling_list, count, polling_interval);

        PR_Lock(group->ml);

        if (-1 == count_ready) goto failed_poll;  /* that's a shame */
        for (poll_list = group->polling_list; count > 0; poll_list++, count--)
        {
            if (poll_list->out_flags != 0)
            {
                waiter = _MW_LookupInternal(group, poll_list->fd);
                if (NULL != waiter)
                    _MW_DoneInternal(group, waiter, PR_MW_SUCCESS);
            }
        }
        /*
        ** If there are no more threads waiting for completion,
        ** we need to return.
        ** This thread was "borrowed" to do the polling, but it really
        ** belongs to the client.
        */
        if ((_prmw_running != group->state)
        || (0 == group->waiting_threads)) break;
        PR_Unlock(group->ml);
    }

    rv = PR_SUCCESS;

failed_poll:
failed_alloc:
    group->poller = NULL;  /* we were that, not we ain't */
    return rv;  /* we return with the lock held */
}  /* _MW_PollInternal */
示例#26
0
nsresult nsMsgMdnGenerator::CreateFirstPart()
{
    DEBUG_MDN("nsMsgMdnGenerator::CreateFirstPart");
    char *convbuf = nsnull, *tmpBuffer = nsnull;
    char *parm = nsnull;
    nsString firstPart1;
    nsString firstPart2;
    nsresult rv = NS_OK;
    nsCOMPtr <nsIMsgCompUtils> compUtils;

    if (m_mimeSeparator.IsEmpty())
    {
      compUtils = do_GetService(NS_MSGCOMPUTILS_CONTRACTID, &rv);
      NS_ENSURE_SUCCESS(rv, rv);
      rv = compUtils->MimeMakeSeparator("mdn", getter_Copies(m_mimeSeparator));
      NS_ENSURE_SUCCESS(rv, rv);
    }
    if (m_mimeSeparator.IsEmpty())
      return NS_ERROR_OUT_OF_MEMORY;

    tmpBuffer = (char *) PR_CALLOC(256);

    if (!tmpBuffer)
        return NS_ERROR_OUT_OF_MEMORY;

    PRExplodedTime now;
    PR_ExplodeTime(PR_Now(), PR_LocalTimeParameters, &now);

    int gmtoffset = (now.tm_params.tp_gmt_offset + now.tm_params.tp_dst_offset)
        / 60;
  /* Use PR_FormatTimeUSEnglish() to format the date in US English format,
     then figure out what our local GMT offset is, and append it (since
     PR_FormatTimeUSEnglish() can't do that.) Generate four digit years as
     per RFC 1123 (superceding RFC 822.)
  */
    PR_FormatTimeUSEnglish(tmpBuffer, 100,
                           "Date: %a, %d %b %Y %H:%M:%S ",
                           &now);

    PR_snprintf(tmpBuffer + strlen(tmpBuffer), 100,
                "%c%02d%02d" CRLF,
                (gmtoffset >= 0 ? '+' : '-'),
                ((gmtoffset >= 0 ? gmtoffset : -gmtoffset) / 60),
                ((gmtoffset >= 0 ? gmtoffset : -gmtoffset) % 60));

    rv = WriteString(tmpBuffer);
    PR_Free(tmpBuffer);
    if (NS_FAILED(rv))
        return rv;

    PRBool conformToStandard = PR_FALSE;
    if (compUtils)
      compUtils->GetMsgMimeConformToStandard(&conformToStandard);

    nsString fullName;
    m_identity->GetFullName(fullName);

    nsCString fullAddress;
    nsCOMPtr<nsIMsgHeaderParser> parser (do_GetService(NS_MAILNEWS_MIME_HEADER_PARSER_CONTRACTID));
    if (parser)
    {
        // convert fullName to UTF8 before passing it to MakeFullAddressString
        parser->MakeFullAddressString(NS_ConvertUTF16toUTF8(fullName).get(),
                                      m_email.get(), getter_Copies(fullAddress));
    }

    convbuf = nsMsgI18NEncodeMimePartIIStr(
        (!fullAddress.IsEmpty()) ? fullAddress.get(): m_email.get(),
        PR_TRUE, m_charset.get(), 0, conformToStandard);

    parm = PR_smprintf("From: %s" CRLF, convbuf ? convbuf : m_email.get());

    rv = FormatStringFromName(NS_LITERAL_STRING("MsgMdnMsgSentTo").get(), NS_ConvertASCIItoUTF16(m_email).get(),
                            getter_Copies(firstPart1));
    if (NS_FAILED(rv))
        return rv;

    PUSH_N_FREE_STRING (parm);

    PR_Free(convbuf);

    if (compUtils)
    {
      nsCString msgId;
      rv = compUtils->MsgGenerateMessageId(m_identity, getter_Copies(msgId));
      tmpBuffer = PR_smprintf("Message-ID: %s" CRLF, msgId.get());
      PUSH_N_FREE_STRING(tmpBuffer);
    }

    nsString receipt_string;
    switch (m_disposeType)
    {
    case nsIMsgMdnGenerator::eDisplayed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnDisplayedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    case nsIMsgMdnGenerator::eDispatched:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnDispatchedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    case nsIMsgMdnGenerator::eProcessed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnProcessedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    case nsIMsgMdnGenerator::eDeleted:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnDeletedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    case nsIMsgMdnGenerator::eDenied:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnDeniedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    case nsIMsgMdnGenerator::eFailed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MdnFailedReceipt").get(),
            getter_Copies(receipt_string));
        break;
    default:
        rv = NS_ERROR_INVALID_ARG;
        break;
    }

    if (NS_FAILED(rv))
        return rv;

    receipt_string.AppendLiteral(" - ");

    char * encodedReceiptString = nsMsgI18NEncodeMimePartIIStr(NS_ConvertUTF16toUTF8(receipt_string).get(), PR_FALSE,
                                                               "UTF-8", 0, conformToStandard);

    nsCString subject;
    m_headers->ExtractHeader(HEADER_SUBJECT, PR_FALSE, getter_Copies(subject));
    convbuf = nsMsgI18NEncodeMimePartIIStr(subject.Length() ? subject.get() : "[no subject]",
                                           PR_FALSE, m_charset.get(), 0, conformToStandard);
    tmpBuffer = PR_smprintf("Subject: %s%s" CRLF,
                             encodedReceiptString,
                            (convbuf ? convbuf : (subject.Length() ? subject.get() :
                              "[no subject]")));

    PUSH_N_FREE_STRING(tmpBuffer);
    PR_Free(convbuf);
    PR_Free(encodedReceiptString);

    convbuf = nsMsgI18NEncodeMimePartIIStr(m_dntRrt.get(), PR_TRUE, m_charset.get(), 0, conformToStandard);
    tmpBuffer = PR_smprintf("To: %s" CRLF, convbuf ? convbuf :
                            m_dntRrt.get());
    PUSH_N_FREE_STRING(tmpBuffer);

    PR_Free(convbuf);

  // *** This is not in the spec. I am adding this so we could do
  // threading
    m_headers->ExtractHeader(HEADER_MESSAGE_ID, PR_FALSE,
                             getter_Copies(m_messageId));

    if (!m_messageId.IsEmpty())
    {
      if (*m_messageId.get() == '<')
          tmpBuffer = PR_smprintf("References: %s" CRLF, m_messageId.get());
      else
          tmpBuffer = PR_smprintf("References: <%s>" CRLF, m_messageId.get());
      PUSH_N_FREE_STRING(tmpBuffer);
    }
    tmpBuffer = PR_smprintf("%s" CRLF, "MIME-Version: 1.0");
    PUSH_N_FREE_STRING(tmpBuffer);

    tmpBuffer = PR_smprintf("Content-Type: multipart/report; \
report-type=disposition-notification;\r\n\tboundary=\"%s\"" CRLF CRLF,
                            m_mimeSeparator.get());
    PUSH_N_FREE_STRING(tmpBuffer);

    tmpBuffer = PR_smprintf("--%s" CRLF, m_mimeSeparator.get());
    PUSH_N_FREE_STRING(tmpBuffer);

    tmpBuffer = PR_smprintf("Content-Type: text/plain; charset=UTF-8" CRLF);
    PUSH_N_FREE_STRING(tmpBuffer);

    tmpBuffer = PR_smprintf("Content-Transfer-Encoding: %s" CRLF CRLF,
                            ENCODING_8BIT);
    PUSH_N_FREE_STRING(tmpBuffer);

    if (!firstPart1.IsEmpty())
    {
        tmpBuffer = PR_smprintf("%s" CRLF CRLF, NS_ConvertUTF16toUTF8(firstPart1).get());
        PUSH_N_FREE_STRING(tmpBuffer);
    }

    switch (m_disposeType)
    {
    case nsIMsgMdnGenerator::eDisplayed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnDisplayed").get(),
            getter_Copies(firstPart2));
        break;
    case nsIMsgMdnGenerator::eDispatched:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnDispatched").get(),
            getter_Copies(firstPart2));
        break;
    case nsIMsgMdnGenerator::eProcessed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnProcessed").get(),
            getter_Copies(firstPart2));
        break;
    case nsIMsgMdnGenerator::eDeleted:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnDeleted").get(),
            getter_Copies(firstPart2));
        break;
    case nsIMsgMdnGenerator::eDenied:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnDenied").get(),
            getter_Copies(firstPart2));
        break;
    case nsIMsgMdnGenerator::eFailed:
        rv = GetStringFromName(
            NS_LITERAL_STRING("MsgMdnFailed").get(),
            getter_Copies(firstPart2));
        break;
    default:
        rv = NS_ERROR_INVALID_ARG;
        break;
    }

    if (NS_FAILED(rv))
        return rv;

    if (!firstPart2.IsEmpty())
    {
        tmpBuffer =
            PR_smprintf("%s" CRLF CRLF,
                        NS_ConvertUTF16toUTF8(firstPart2).get());
        PUSH_N_FREE_STRING(tmpBuffer);
    }

    return rv;
}
nsresult
nsMsgAttachmentHandler::ConvertToAppleEncoding(const nsCString &aFileURI, 
                                               const nsCString &aFilePath, 
                                               nsILocalFileMac *aSourceFile)
{
  // convert the apple file to AppleDouble first, and then patch the
  // address in the url.
  
  //We need to retrieve the file type and creator...

  char fileInfo[32];
  OSType type, creator;

  nsresult rv = aSourceFile->GetFileType(&type);
  if (NS_FAILED(rv))
    return false;
  PR_snprintf(fileInfo, sizeof(fileInfo), "%X", type);
  m_xMacType = fileInfo;

  rv = aSourceFile->GetFileCreator(&creator);
  if (NS_FAILED(rv))
    return false;
  PR_snprintf(fileInfo, sizeof(fileInfo), "%X", creator);
  m_xMacCreator = fileInfo;

  FSRef fsRef;
  aSourceFile->GetFSRef(&fsRef);
  bool sendResourceFork = HasResourceFork(&fsRef);

  // if we have a resource fork, check the filename extension, maybe we don't need the resource fork!
  if (sendResourceFork)
  {
    nsCOMPtr<nsIURL> fileUrl(do_CreateInstance(NS_STANDARDURL_CONTRACTID));
    if (fileUrl)
    {
      rv = fileUrl->SetSpec(aFileURI);
      if (NS_SUCCEEDED(rv))
      {
        nsCAutoString ext;
        rv = fileUrl->GetFileExtension(ext);
        if (NS_SUCCEEDED(rv) && !ext.IsEmpty())
        {
          sendResourceFork =
          PL_strcasecmp(ext.get(), "TXT") &&
          PL_strcasecmp(ext.get(), "JPG") &&
          PL_strcasecmp(ext.get(), "GIF") &&
          PL_strcasecmp(ext.get(), "TIF") &&
          PL_strcasecmp(ext.get(), "HTM") &&
          PL_strcasecmp(ext.get(), "HTML") &&
          PL_strcasecmp(ext.get(), "ART") &&
          PL_strcasecmp(ext.get(), "XUL") &&
          PL_strcasecmp(ext.get(), "XML") &&
          PL_strcasecmp(ext.get(), "CSS") &&
          PL_strcasecmp(ext.get(), "JS");
        }
      }
    }
  }

  // Only use appledouble if we aren't uuencoding.
  if( sendResourceFork )
  {
    char *separator;

    separator = mime_make_separator("ad");
    if (!separator)
      return NS_ERROR_OUT_OF_MEMORY;

    nsCOMPtr <nsIFile> tmpFile;
    nsresult rv = nsMsgCreateTempFile("appledouble", getter_AddRefs(tmpFile));
    if (NS_SUCCEEDED(rv))
      mEncodedWorkingFile = do_QueryInterface(tmpFile);
    if (!mEncodedWorkingFile)
    {
      PR_FREEIF(separator);
      return NS_ERROR_OUT_OF_MEMORY;
    }

    //
    // RICHIE_MAC - ok, here's the deal, we have a file that we need
    // to encode in appledouble encoding for the resource fork and put that
    // into the mEncodedWorkingFile location. Then, we need to patch the new file
    // spec into the array and send this as part of the 2 part appledouble/mime
    // encoded mime part.
    //
    AppleDoubleEncodeObject     *obj = new (AppleDoubleEncodeObject);
    if (obj == NULL)
    {
      mEncodedWorkingFile = nsnull;
      PR_FREEIF(separator);
      return NS_ERROR_OUT_OF_MEMORY;
    }

    rv = MsgGetFileStream(mEncodedWorkingFile, getter_AddRefs(obj->fileStream));
    if (NS_FAILED(rv) || !obj->fileStream)
    {
      PR_FREEIF(separator);
      delete obj;
      return NS_ERROR_OUT_OF_MEMORY;
    }

    PRInt32     bSize = AD_WORKING_BUFF_SIZE;

    char  *working_buff = nsnull;
    while (!working_buff && (bSize >= 512))
    {
      working_buff = (char *)PR_CALLOC(bSize);
      if (!working_buff)
        bSize /= 2;
    }

    if (!working_buff)
    {
      PR_FREEIF(separator);
      delete obj;
      return NS_ERROR_OUT_OF_MEMORY;
    }

    obj->buff = working_buff;
    obj->s_buff = bSize;

    //
    //  Setup all the need information on the apple double encoder.
    //
    ap_encode_init(&(obj->ap_encode_obj), aFilePath.get(), separator);

    PRInt32 count;

    nsresult status = noErr;
    m_size = 0;
    while (status == noErr)
    {
      status = ap_encode_next(&(obj->ap_encode_obj), obj->buff, bSize, &count);
      if (status == noErr || status == errDone)
      {
        //
        // we got the encode data, so call the next stream to write it to the disk.
        //
        PRUint32 bytesWritten;
        obj->fileStream->Write(obj->buff, count, &bytesWritten);
        if (bytesWritten != (PRUint32) count)
          status = NS_MSG_ERROR_WRITING_FILE;
      }
    }

    ap_encode_end(&(obj->ap_encode_obj), (status >= 0)); // if this is true, ok, false abort
    if (obj->fileStream)
      obj->fileStream->Close();

    PR_FREEIF(obj->buff);               /* free the working buff.   */
    PR_FREEIF(obj);

    nsCOMPtr <nsIURI> fileURI;
    NS_NewFileURI(getter_AddRefs(fileURI), mEncodedWorkingFile);

    nsCOMPtr<nsIFileURL> theFileURL = do_QueryInterface(fileURI, &rv);
    NS_ENSURE_SUCCESS(rv,rv);

    nsCString newURLSpec;
    NS_ENSURE_SUCCESS(rv, rv);
    fileURI->GetSpec(newURLSpec);

    if (newURLSpec.IsEmpty())
    {
      PR_FREEIF(separator);
      return NS_ERROR_OUT_OF_MEMORY;
    }

    if (NS_FAILED(nsMsgNewURL(getter_AddRefs(mURL), newURLSpec.get())))
    {
      PR_FREEIF(separator);
      return NS_ERROR_OUT_OF_MEMORY;
    }

    // Now after conversion, also patch the types.
    char        tmp[128];
    PR_snprintf(tmp, sizeof(tmp), MULTIPART_APPLEDOUBLE ";\r\n boundary=\"%s\"", separator);
    PR_FREEIF(separator);
    m_type = tmp;
  }
  else
  {
    if ( sendResourceFork )
    {
      // For now, just do the encoding, but in the old world we would ask the
      // user about doing this conversion
      printf("...we could ask the user about this conversion, but for now, nahh..\n");
    }

    bool      useDefault;
    char      *macType, *macEncoding;
    if (m_type.IsEmpty() || m_type.LowerCaseEqualsLiteral(TEXT_PLAIN))
    {
# define TEXT_TYPE  0x54455854  /* the characters 'T' 'E' 'X' 'T' */
# define text_TYPE  0x74657874  /* the characters 't' 'e' 'x' 't' */

      if (type != TEXT_TYPE && type != text_TYPE)
      {
        MacGetFileType(aSourceFile, &useDefault, &macType, &macEncoding);
        m_type = macType;
      }
    }
    // don't bother to set the types if we failed in getting the file info.
  }

  return NS_OK;
}
示例#28
0
static PRInt32 PR_CALLBACK SocketWritev(PRFileDesc *fd, PRIOVec *iov, PRInt32 iov_size,
PRIntervalTime timeout)
{
	PRThread *me = _PR_MD_CURRENT_THREAD();
	int w = 0;
	PRIOVec *tmp_iov = NULL;
	int tmp_out;
	int index, iov_cnt;
	int count=0, sz = 0;    /* 'count' is the return value. */
#if defined(XP_UNIX)
	struct timeval tv, *tvp;
	fd_set wd;

	FD_ZERO(&wd);
	if (timeout == PR_INTERVAL_NO_TIMEOUT)
		tvp = NULL;
	else if (timeout != PR_INTERVAL_NO_WAIT) {
		tv.tv_sec = PR_IntervalToSeconds(timeout);
		tv.tv_usec = PR_IntervalToMicroseconds(
		    timeout - PR_SecondsToInterval(tv.tv_sec));
		tvp = &tv;
	}
#endif

	if (_PR_PENDING_INTERRUPT(me)) {
		me->flags &= ~_PR_INTERRUPT;
		PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
		return -1;
	}
	if (_PR_IO_PENDING(me)) {
		PR_SetError(PR_IO_PENDING_ERROR, 0);
		return -1;
	}

	tmp_iov = (PRIOVec *)PR_CALLOC(iov_size * sizeof(PRIOVec));
	if (!tmp_iov) {
		PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
		return -1;
	}

	for (index=0; index<iov_size; index++) {
		sz += iov[index].iov_len;
		tmp_iov[index].iov_base = iov[index].iov_base;
		tmp_iov[index].iov_len = iov[index].iov_len;
	}
	iov_cnt = iov_size;

	while (sz > 0) {

		w = _PR_MD_WRITEV(fd, tmp_iov, iov_cnt, timeout);
		if (w < 0) {
					count = -1;
					break;
		}
		count += w;
		if (fd->secret->nonblocking) {
			break;
		}
		sz -= w;

		if (sz > 0) {
			/* find the next unwritten vector */
			for ( index = 0, tmp_out = count;
			    tmp_out >= iov[index].iov_len;
			    tmp_out -= iov[index].iov_len, index++){;} /* nothing to execute */


			/* fill in the first partial read */
			tmp_iov[0].iov_base = &(((char *)iov[index].iov_base)[tmp_out]);
			tmp_iov[0].iov_len = iov[index].iov_len - tmp_out;
			index++;

			/* copy the remaining vectors */
			for (iov_cnt=1; index<iov_size; iov_cnt++, index++) {
				tmp_iov[iov_cnt].iov_base = iov[index].iov_base;
				tmp_iov[iov_cnt].iov_len = iov[index].iov_len;
			}
		}
	}

	if (tmp_iov)
		PR_DELETE(tmp_iov);
	return count;
}
void 
CondVarTestPUU(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *tcount, *saved_tcount;

    exitcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    saved_tcount = tcount = (PRInt32 *)PR_CALLOC(sizeof(*tcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg; ) {
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);

	DPRINTF(("CondVarTestPUU: created thread 0x%lx\n",list[index].thread));
        index++;
	tcount++;
    }

    for (loops = 0; loops < count; loops++) {
        /* Notify the threads */
        for(index=0; index<(arg); index++) {

            PR_Lock(list[index].lock);
            (*list[index].tcount)++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
        }

	PR_Lock(exitlock);
        /* Wait for threads to finish */
        while(exitcount < arg) {
DPRINTF(("CondVarTestPUU: thread 0x%lx waiting on exitcvar = 0x%lx cnt = %ld\n",
				PR_GetCurrentThread(), exitcvar, exitcount));
            	PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
	}
        PR_ASSERT(exitcount >= arg);
        exitcount -= arg;
        PR_Unlock(exitlock);
    }

    /* Join all the threads */
    for(index=0; index<(arg); index++)  {
	DPRINTF(("CondVarTestPUU: joining thread 0x%lx\n",list[index].thread));
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);
    PR_DestroyCondVar(exitcvar);
    PR_DestroyLock(exitlock);

    PR_DELETE(list);
    PR_DELETE(saved_tcount);
}
int main(int argc, char **argv)
{
    PRStatus rv;
    PLOptStatus os;
    PRUint8 *buffer;
    PRFileDesc *file = NULL;
    const char *filename = "sync.dat";
    PRUint32 index, loops, iterations = 10, filesize = 10;
    PRIntn flags = PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE;
    PLOptState *opt = PL_CreateOptState(argc, argv, "hSK:c:");
    PRIntervalTime time, total = 0, shortest = 0x7fffffff, longest = 0;

    err = PR_GetSpecialFD(PR_StandardError);

    while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
    {
        if (PL_OPT_BAD == os) continue;
        switch (opt->option)
        {
        case 0:       /* Name of file to create */
            filename = opt->value;
            break;
        case 'S':       /* Use sych option on file */
            flags |= PR_SYNC;
            break;
        case 'K':       /* Size of file to write */
            filesize = atoi(opt->value);
            break;
        case 'c':       /* Number of iterations */
            iterations = atoi(opt->value);
            break;
        case 'h':       /* user wants some guidance */
        default:        /* user needs some guidance */
            Help();     /* so give him an earful */
            return 2;   /* but not a lot else */
        }
    }
    PL_DestroyOptState(opt);

    file = PR_Open(filename, flags, 0666);
    if (NULL == file)
    {
        PL_FPrintError(err, "Failed to open file");
        return 1;
    }

    buffer = (PRUint8*)PR_CALLOC(1024);
    if (NULL == buffer)
    {
        PL_FPrintError(err, "Cannot allocate buffer");
        return 1;
    }

    for (index = 0; index < sizeof(buffer); ++index)
        buffer[index] = (PRUint8)index;

    for (loops = 0; loops < iterations; ++loops)
    {
        time = PR_IntervalNow();
        for (index = 0; index < filesize; ++index)
        {
            PR_Write(file, buffer, 1024);
        }
        time = (PR_IntervalNow() - time);

        total += time;
        if (time < shortest) shortest = time;
        else if (time > longest) longest = time;
        if (0 != PR_Seek(file, 0, PR_SEEK_SET))
        {
           PL_FPrintError(err, "Rewinding file");
           return 1;
        }
    }

    total = total / iterations;
    PR_fprintf(
        err, "%u iterations over a %u kbyte %sfile: %u [%u] %u\n",
        iterations, filesize, ((flags & PR_SYNC) ? "SYNCH'd " : ""),
        PR_IntervalToMicroseconds(shortest),
        PR_IntervalToMicroseconds(total),
        PR_IntervalToMicroseconds(longest));

    PR_DELETE(buffer);
    rv = PR_Close(file);
    if (PR_SUCCESS != rv)
    {
        PL_FPrintError(err, "Closing file failed");
        return 1;
    }
    rv = PR_Delete(filename);
    if (PR_SUCCESS != rv)
    {
        PL_FPrintError(err, "Deleting file failed");
        return 1;
    }
    return 0;
}