bool SendFileStraight::send() {
	if (m_stream->isOpen()){
		lockMutex();
		std::string data;
		int size = m_parent->getDataToSend(data);
		if (size != 0) {
			int ret = m_stream->send(data);
			if (ret < 1) {
				std::cout << "error in sending or sending probably finished\n";
				unlockMutex();
				return false;
			};
		}
		else
			wait();
		unlockMutex();
	}
	else {
		std::cout << "stream not open!\n";
		g_usleep(G_USEC_PER_SEC);
	}
	if (m_stream->recv(2000) != ConnNoError) {
		m_parent->tryToDeleteMe();
// 		g_timeout_add(0, &try_to_delete_me, m_parent);
		return false;
	}
	if (shouldStop()) {
		Log("SendFileStraight", "stopping thread");
		return false;
	}
	
	return true;
}
void
ArchMultithreadWindows::doThreadFunc(ArchThread thread)
{
    // wait for parent to initialize this object
    lockMutex(m_threadMutex);
    unlockMutex(m_threadMutex);

    void* result = NULL;
    try {
        // go
        result = (*thread->m_func)(thread->m_userData);
    }

    catch (XThreadCancel&) {
        // client called cancel()
    }
    catch (...) {
        // note -- don't catch (...) to avoid masking bugs
        SetEvent(thread->m_exit);
        closeThread(thread);
        throw;
    }

    // thread has exited
    lockMutex(m_threadMutex);
    thread->m_result = result;
    unlockMutex(m_threadMutex);
    SetEvent(thread->m_exit);

    // done with thread
    closeThread(thread);
}
Beispiel #3
0
void AutoFreePool_addCallback(void (* callback)(void * context), void * context) {
    if (mutex != NULL) {
        lockMutex(mutex);
    }

    if (poolStackDepth == -1) {
        if (mutex != NULL) {
            unlockMutex(mutex);
        }
        AutoFreePool_push();
        if (mutex != NULL) {
            lockMutex(mutex);
        }
    }

    if (poolStack[poolStackDepth].callbacks == NULL) {
        poolStack[poolStackDepth].numberOfCallbacks = 0;
        poolStack[poolStackDepth].callbackListSize = 1;
        poolStack[poolStackDepth].callbacks = malloc(sizeof(struct AutoFreePool_callback) * poolStack[poolStackDepth].callbackListSize);

    } else if (poolStack[poolStackDepth].numberOfCallbacks >= poolStack[poolStackDepth].callbackListSize) {
        poolStack[poolStackDepth].callbackListSize *= 2;
        poolStack[poolStackDepth].callbacks = realloc(poolStack[poolStackDepth].callbacks, sizeof(struct AutoFreePool_callback) * poolStack[poolStackDepth].callbackListSize);
    }

    poolStack[poolStackDepth].callbacks[poolStack[poolStackDepth].numberOfCallbacks].callback = callback;
    poolStack[poolStackDepth].callbacks[poolStack[poolStackDepth].numberOfCallbacks].context = context;
    poolStack[poolStackDepth].numberOfCallbacks++;
    if (mutex != NULL) {
        unlockMutex(mutex);
    }
}
VPLTHREAD_FN_DECL AsyncServiceClientStateImpl::doRpc(void* arg)
{
    AsyncServiceClientState* clientState = (AsyncServiceClientState*)arg;
    // Lock sendRequestMutex before dequeuing, to ensure that we send the
    // request messages in the same order that they were enqueued.
    lockMutex(&clientState->pImpl->sendRequestMutex);
    AsyncCallState* callState = clientState->pImpl->requests.dequeue(true);
    if (callState == NULL) {
        unlockMutex(&clientState->pImpl->sendRequestMutex);
        // TODO: assert failed!
    } else {
        ProtoRpcClient* tempClient = callState->asyncClient->_acquireClientCallback();
        if (tempClient == NULL) {
            unlockMutex(&clientState->pImpl->sendRequestMutex);
            callState->status.set_status(RpcStatus_Status_INTERNAL_ERROR);
            callState->status.set_errordetail("Failed to acquire ProtoRpcClient");
        } else {
            // Must call this with sendRequestMutex held.
            bool sendSuccess = callState->sendRpcRequest(*tempClient);
            // Request message sent; we can release the lock now.
            unlockMutex(&clientState->pImpl->sendRequestMutex);
            if (sendSuccess) {
                callState->recvRpcResponse(*tempClient);
            }
            callState->asyncClient->_releaseClientCallback(tempClient);
        }
        clientState->pImpl->results.enqueue(callState);
    }
    return VPLTHREAD_RETURN_VALUE;
}
Beispiel #5
0
void AutoFreePool_empty() {
    if (mutex != NULL) {
        lockMutex(mutex);
    }

    if (poolStackDepth == -1) {
        if (mutex != NULL) {
            unlockMutex(mutex);
        }
        return;
    }

    if (poolStack[poolStackDepth].addresses != NULL) {
        unsigned int addressIndex;

        for (addressIndex = 0; addressIndex < poolStack[poolStackDepth].numberOfAddresses; addressIndex++) {
            free(poolStack[poolStackDepth].addresses[addressIndex]);
        }
        poolStack[poolStackDepth].numberOfAddresses = 0;
    }

    if (poolStack[poolStackDepth].callbacks != NULL) {
        unsigned int callbackIndex;

        for (callbackIndex = 0; callbackIndex < poolStack[poolStackDepth].numberOfCallbacks; callbackIndex++) {
            poolStack[poolStackDepth].callbacks[callbackIndex].callback(poolStack[poolStackDepth].callbacks[callbackIndex].context);
        }
        poolStack[poolStackDepth].numberOfCallbacks = 0;
    }
    if (mutex != NULL) {
        unlockMutex(mutex);
    }
}
Beispiel #6
0
void * AutoFreePool_add(void * address) {
    if (mutex != NULL) {
        lockMutex(mutex);
    }

    if (poolStackDepth == -1) {
        if (mutex != NULL) {
            unlockMutex(mutex);
        }
        AutoFreePool_push();
        if (mutex != NULL) {
            lockMutex(mutex);
        }
    }

    if (poolStack[poolStackDepth].addresses == NULL) {
        poolStack[poolStackDepth].numberOfAddresses = 0;
        poolStack[poolStackDepth].addressListSize = 1;
        poolStack[poolStackDepth].addresses = malloc(sizeof(void *) * poolStack[poolStackDepth].addressListSize);

    } else if (poolStack[poolStackDepth].numberOfAddresses >= poolStack[poolStackDepth].addressListSize) {
        poolStack[poolStackDepth].addressListSize *= 2;
        poolStack[poolStackDepth].addresses = realloc(poolStack[poolStackDepth].addresses, sizeof(void *) * poolStack[poolStackDepth].addressListSize);
    }

    poolStack[poolStackDepth].addresses[poolStack[poolStackDepth].numberOfAddresses++] = address;
    if (mutex != NULL) {
        unlockMutex(mutex);
    }

    return address;
}
bool
ArchMultithreadWindows::waitCondVar(ArchCond cond,
                            ArchMutex mutex, double timeout)
{
    // prepare to wait
    const DWORD winTimeout = (timeout < 0.0) ? INFINITE :
                                static_cast<DWORD>(1000.0 * timeout);

    // make a list of the condition variable events and the cancel event
    // for the current thread.
    HANDLE handles[4];
    handles[0] = cond->m_events[ArchCondImpl::kSignal];
    handles[1] = cond->m_events[ArchCondImpl::kBroadcast];
    handles[2] = getCancelEventForCurrentThread();

    // update waiter count
    lockMutex(cond->m_waitCountMutex);
    ++cond->m_waitCount;
    unlockMutex(cond->m_waitCountMutex);

    // release mutex.  this should be atomic with the wait so that it's
    // impossible for another thread to signal us between the unlock and
    // the wait, which would lead to a lost signal on broadcasts.
    // however, we're using a manual reset event for broadcasts which
    // stays set until we reset it, so we don't lose the broadcast.
    unlockMutex(mutex);

    // wait for a signal or broadcast
    DWORD result = WaitForMultipleObjects(3, handles, FALSE, winTimeout);

    // cancel takes priority
    if (result != WAIT_OBJECT_0 + 2 &&
        WaitForSingleObject(handles[2], 0) == WAIT_OBJECT_0) {
        result = WAIT_OBJECT_0 + 2;
    }

    // update the waiter count and check if we're the last waiter
    lockMutex(cond->m_waitCountMutex);
    --cond->m_waitCount;
    const bool last = (result == WAIT_OBJECT_0 + 1 && cond->m_waitCount == 0);
    unlockMutex(cond->m_waitCountMutex);

    // reset the broadcast event if we're the last waiter
    if (last) {
        ResetEvent(cond->m_events[ArchCondImpl::kBroadcast]);
    }

    // reacquire the mutex
    lockMutex(mutex);

    // cancel thread if necessary
    if (result == WAIT_OBJECT_0 + 2) {
        ARCH->testCancelThread();
    }

    // return success or failure
    return (result == WAIT_OBJECT_0 + 0 ||
            result == WAIT_OBJECT_0 + 1);
}
bool
ArchMultithreadPosix::wait(ArchThread target, double timeout)
{
	assert(target != NULL);

	lockMutex(m_threadMutex);

	// find current thread
	ArchThreadImpl* self = findNoRef(pthread_self());

	// ignore wait if trying to wait on ourself
	if (target == self) {
		unlockMutex(m_threadMutex);
		return false;
	}

	// ref the target so it can't go away while we're watching it
	refThread(target);

	unlockMutex(m_threadMutex);

	try {
		// do first test regardless of timeout
		testCancelThreadImpl(self);
		if (isExitedThread(target)) {
			closeThread(target);
			return true;
		}

		// wait and repeat test if there's a timeout
		if (timeout != 0.0) {
			const double start = ARCH->time();
			do {
				// wait a little
				ARCH->sleep(0.05);

				// repeat test
				testCancelThreadImpl(self);
				if (isExitedThread(target)) {
					closeThread(target);
					return true;
				}

				// repeat wait and test until timed out
			} while (timeout < 0.0 || (ARCH->time() - start) <= timeout);
		}

		closeThread(target);
		return false;
	}
	catch (...) {
		closeThread(target);
		throw;
	}
}
Beispiel #9
0
int queue_get(queue_t*queue, void*data)
{
    lockMutex(&queue->mutex);
    if(queue->writepos == queue->readpos) {
	unlockMutex(&queue->mutex);
	return 0;
    }
    memcpy(data, &queue->data[queue->readpos*queue->size], queue->size);
    queue->readpos++;
    queue->readpos %= queue->nmemb;
    queue->number--;
    unlockMutex(&queue->mutex);
    return 1;
}
PAPIListenerId connectionAddListener( PAPIConnection *	inConnection,
									  const char *		inComponentName,
									  PAPIListener		inListener,
									  void *			inUserData,
									  PAPIStatus *		outStatus )
{
	PAPIListenerId theListenerId	= 0;
	*outStatus			= PAPISuccess;
	if ( lockMutex( inConnection->mListenersMutex ) == 0 )
	{
		theListenerId = addListenerList( &( inConnection->mListeners ),
						inComponentName,
						inListener,
						inUserData );
		*outStatus = theListenerId == 0 ?
							PAPIOutOfMemoryFailure :
							PAPISuccess;
		unlockMutex( inConnection->mListenersMutex );
	}
	else
	{
		*outStatus = PAPIUnknownFailure;
	}
	return theListenerId;
}
Beispiel #11
0
bool VideoImpl::seekTo(guint64 positionNanoSeconds)
{
  if (!_appsink0 || !_seekEnabled)
  {
    return false;
  }
  else
  {
    lockMutex();

    // Free the current sample and reset.
    _freeCurrentSample();
    _bitsChanged = false;

    // Seek to position.
    bool result = gst_element_seek_simple(
                    _appsink0, GST_FORMAT_TIME,
                    GstSeekFlags( GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE ),
                    positionNanoSeconds);

    unlockMutex();

    return result;
  }
}
int feedbackLibrary(char *name, int preload)
{
	struct section_file_data *sfd;
	int retval = 0;

	if( !kaffe_feedback_file )
		return( 0 );
	lockMutex(kaffe_feedback_file);
	if( !(sfd = findSectionInFile(kaffe_feedback_file,
				      &lib_section, name)) )
	{
		/*
		 * If the section doesn't exist we need to create and add it.
		 * We only set preload here since the user might've changed
		 * the file to specify otherwise.
		 */
		if( (sfd = createFileSection(lib_section.fs_name, name,
					     "preload", preload ?
					     "true" : "false",
					     NULL)) )
		{
			addSectionToFile(kaffe_feedback_file, sfd);
			retval = 1;
		}
	}
	else
		retval = 1;
	unlockMutex(kaffe_feedback_file);
	return( retval );
}
Beispiel #13
0
int writePipe(pipe_t pipe,char* msg, uint64_t amount){
    int i;
    lockMutex(pipe->writeMutex);
    while (pipe->bufferSize >= MINPAGE){
        waitCondVar(&pipe->writeCondVar,pipe->writeMutex);
    }
    lockMutex(pipe->mutex);
    for(i=0;i<amount;i++){
        pipe->buffer[(pipe->initialIndex + pipe->bufferSize) %MINPAGE]=msg[i];
        pipe->bufferSize ++;
    }
    signalCondVar(&pipe->readCondVar);
    unlockMutex(pipe->mutex);
    unlockMutex(pipe->writeMutex);
    return 1;
}
ArchThread
ArchMultithreadWindows::newThread(ThreadFunc func, void* data)
{
    lockMutex(m_threadMutex);

    // create thread impl for new thread
    ArchThreadImpl* thread = new ArchThreadImpl;
    thread->m_func          = func;
    thread->m_userData      = data;

    // create thread
    unsigned int id = 0;
    thread->m_thread = reinterpret_cast<HANDLE>(_beginthreadex(NULL, 0,
                                threadFunc, (void*)thread, 0, &id));
    thread->m_id     = static_cast<DWORD>(id);

    // check if thread was started
    if (thread->m_thread == 0) {
        // failed to start thread so clean up
        delete thread;
        thread = NULL;
    }
    else {
        // add thread to list
        insert(thread);

        // increment ref count to account for the thread itself
        refThread(thread);
    }

    // note that the child thread will wait until we release this mutex
    unlockMutex(m_threadMutex);

    return thread;
}
AsyncCallState* MyQueue::dequeue(bool blockIfEmpty)
{
    int sem_rv;
    if (blockIfEmpty) {
        sem_rv = VPLSem_Wait(&queueSem);
    } else {
        sem_rv = VPLSem_TryWait(&queueSem);
    }
    if (sem_rv == 0) {
        AsyncCallState* result;
        lockMutex(&queueMutex);
        if (head == NULL) {
            result = NULL;
        } else {
            result = head;
            head = result->nextInQueue;
            if (head == NULL) {
                tail = NULL;
            }
            result->nextInQueue = NULL;
        }
        unlockMutex(&queueMutex);
        return result;
    } else {
        return NULL;
    }
}
Beispiel #16
0
int closePipe(int pipe)
{
    Spipe *p = checkSpipeData(pipe);
    if(!p)
        return -1;

    int pid = getpid();

    enterProcessCriticalCode(pid);
    // лочим мютекс, блокируем io операции над пайпом
    lockMutex(&p->locker);
    free(p->memory);
    p->memory = 0;

    destroyWaitCond(p->ready_read_wid);
    destroyWaitCond(p->ready_write_wid);
    destroyWaitCond(p->fully_flushed_wid);
    destroyWaitCond(p->fully_empty_wid);
    unlockMutex(&p->locker);
    destroyMutex(&p->locker);

    freeSpipeData(p->id);
    leaveProcessCriticalCode(pid);

    return 0;
}
void *FastGaussianBlurValueOperation::initializeTileData(rcti *rect)
{
	lockMutex();
	if (!this->m_iirgaus) {
		MemoryBuffer *newBuf = (MemoryBuffer *)this->m_inputprogram->initializeTileData(rect);
		MemoryBuffer *copy = newBuf->duplicate();
		FastGaussianBlurOperation::IIR_gauss(copy, this->m_sigma, 0, 3);

		if (this->m_overlay == FAST_GAUSS_OVERLAY_MIN) {
			float *src = newBuf->getBuffer();
			float *dst = copy->getBuffer();
			for (int i = copy->getWidth() * copy->getHeight(); i != 0; i--, src += COM_NUM_CHANNELS_VALUE, dst += COM_NUM_CHANNELS_VALUE) {
				if (*src < *dst) {
					*dst = *src;
				}
			}
		}
		else if (this->m_overlay == FAST_GAUSS_OVERLAY_MAX) {
			float *src = newBuf->getBuffer();
			float *dst = copy->getBuffer();
			for (int i = copy->getWidth() * copy->getHeight(); i != 0; i--, src += COM_NUM_CHANNELS_VALUE, dst += COM_NUM_CHANNELS_VALUE) {
				if (*src > *dst) {
					*dst = *src;
				}
			}
		}

//		newBuf->

		this->m_iirgaus = copy;
	}
	unlockMutex();
	return this->m_iirgaus;
}
void *FastGaussianBlurOperation::initializeTileData(rcti *rect)
{
	lockMutex();
	if (!this->m_iirgaus) {
		MemoryBuffer *newBuf = (MemoryBuffer *)this->m_inputProgram->initializeTileData(rect);
		MemoryBuffer *copy = newBuf->duplicate();
		updateSize();

		int c;
		this->m_sx = this->m_data.sizex * this->m_size / 2.0f;
		this->m_sy = this->m_data.sizey * this->m_size / 2.0f;

		if ((this->m_sx == this->m_sy) && (this->m_sx > 0.0f)) {
			for (c = 0; c < COM_NUM_CHANNELS_COLOR; ++c)
				IIR_gauss(copy, this->m_sx, c, 3);
		}
		else {
			if (this->m_sx > 0.0f) {
				for (c = 0; c < COM_NUM_CHANNELS_COLOR; ++c)
					IIR_gauss(copy, this->m_sx, c, 1);
			}
			if (this->m_sy > 0.0f) {
				for (c = 0; c < COM_NUM_CHANNELS_COLOR; ++c)
					IIR_gauss(copy, this->m_sy, c, 2);
			}
		}
		this->m_iirgaus = copy;
	}
	unlockMutex();
	return this->m_iirgaus;
}
void SQLMXLoggingArea::logPOSErrorEvent(const Lng32 errorCode,
                                        const char *msg1,
                                        const char *msg2,
                                        const char *msg3)
{
  bool lockedMutex = lockMutex();
  SqlSealogEvent sevent;
  // Open a  new connection 
  sevent.openConnection();
  // set the required parameters
  sevent.setError1(errorCode);
  sevent.setString0((char *)msg1);
  sevent.setString1((char *)msg2);
  sevent.setString2((char *)msg3);
 
  // set the event id and severity and send the event
  if (errorCode == 1150)
    sevent.sendEvent(SQEV_SQL_POS_ERROR, SQ_LOG_ERR);
  else if (errorCode ==1154)
    sevent.sendEvent(SQEV_SQL_POS_CREATE_ERROR,SQ_LOG_ERR);
  // close the connection.
  sevent.closeConnection(); 
  if (lockedMutex)
    unlockMutex();
	
}
void *PlaneCornerPinWarpImageOperation::initializeTileData(rcti *rect)
{
  void *data = PlaneDistortWarpImageOperation::initializeTileData(rect);

  /* get corner values once, by reading inputs at (0,0)
   * XXX this assumes invariable values (no image inputs),
   * we don't have a nice generic system for that yet
   */
  lockMutex();
  if (!m_corners_ready) {
    /* corner sockets start at index 1 */
    SocketReader *readers[4] = {
        getInputSocketReader(1),
        getInputSocketReader(2),
        getInputSocketReader(3),
        getInputSocketReader(4),
    };
    float corners[4][2];
    readCornersFromSockets(rect, readers, corners);
    calculateCorners(corners, true, 0);

    m_corners_ready = true;
  }
  unlockMutex();

  return data;
}
Beispiel #21
0
int queue_put(queue_t*queue, void*data)
{
    int tmp = queue->writepos+1;
    tmp%=queue->nmemb;

    lockMutex(&queue->mutex);
    if(tmp==queue->readpos) {
	unlockMutex(&queue->mutex);
	return 0;
    }
    memcpy(&queue->data[queue->writepos*queue->size], data, queue->size);
    queue->writepos = tmp;
    queue->number++;
    unlockMutex(&queue->mutex);
    return 1;
}
Beispiel #22
0
void queue_flush(queue_t*queue)
{
    lockMutex(&queue->mutex);
    queue->number = 0;
    queue->readpos = 0;
    queue->writepos = 0;
    unlockMutex(&queue->mutex);
}
void*
ArchMultithreadWindows::getResultOfThread(ArchThread thread)
{
    lockMutex(m_threadMutex);
    void* result = thread->m_result;
    unlockMutex(m_threadMutex);
    return result;
}
bool
ArchMultithreadPosix::isExitedThread(ArchThread thread)
{
	lockMutex(m_threadMutex);
	bool exited = thread->m_exited;
	unlockMutex(m_threadMutex);
	return exited;
}
HANDLE
ArchMultithreadWindows::getCancelEventForCurrentThread()
{
    lockMutex(m_threadMutex);
    ArchThreadImpl* thread = findNoRef(GetCurrentThreadId());
    unlockMutex(m_threadMutex);
    return thread->m_cancel;
}
void*
ArchMultithreadWindows::getNetworkDataForThread(ArchThread thread)
{
    lockMutex(m_threadMutex);
    void* data = thread->m_networkData;
    unlockMutex(m_threadMutex);
    return data;
}
void*
CArchMultithreadPosix::getNetworkDataForThread(CArchThread thread)
{
	lockMutex(m_threadMutex);
	void* data = thread->m_networkData;
	unlockMutex(m_threadMutex);
	return data;
}
void*
CArchMultithreadPosix::getResultOfThread(CArchThread thread)
{
	lockMutex(m_threadMutex);
	void* result = thread->m_result;
	unlockMutex(m_threadMutex);
	return result;
}
void
ArchMultithreadWindows::setNetworkDataForCurrentThread(void* data)
{
    lockMutex(m_threadMutex);
    ArchThreadImpl* thread = findNoRef(GetCurrentThreadId());
    thread->m_networkData = data;
    unlockMutex(m_threadMutex);
}
void
ArchMultithreadPosix::setNetworkDataForCurrentThread(void* data)
{
	lockMutex(m_threadMutex);
	ArchThreadImpl* thread = find(pthread_self());
	thread->m_networkData = data;
	unlockMutex(m_threadMutex);
}