ServiceRef::~ServiceRef() { // First, dispose the serice ref. This cancels all asynchronous operations. if (ref_) { DNSServiceRefDeallocate(ref_); } // Then release the js objects. if ( ! callback_.IsEmpty()) { NanDisposePersistent(callback_); } if ( ! context_.IsEmpty()) { NanDisposePersistent(context_); } }
void PersistentDisposeContext::CallDisposeOnV8Thread() { DBG("PersistentDisposeContext::CallDisposeOnV8Thread"); Persistent<Value>* handle = (Persistent<Value>*)ptr.ToPointer(); NanDisposePersistent(*handle); delete handle; }
~GetDataOperation() { EOS_DEBUG_METHOD(); if (!bufferHandle_.IsEmpty()) { EOS_DEBUG(L"Warning! Buffer handle not released by GetDataOperation. (Memory held onto for longer than needed.)\n"); NanDisposePersistent(bufferHandle_); } }
void ServiceRef::SetCallback(v8::Handle<v8::Function> callback) { if ( ! callback_.IsEmpty()) { NanDisposePersistent(callback_); } NanAssignPersistent(callback_, callback); }
void ServiceRef::SetContext(v8::Handle<v8::Value> context) { if ( ! context_.IsEmpty()) { NanDisposePersistent(context_); } NanAssignPersistent(context_, context); }
void EIO_AfterClose(uv_work_t* req) { NanScope(); CloseBaton* data = static_cast<CloseBaton*>(req->data); v8::Handle<v8::Value> argv[1]; if(data->errorString[0]) { argv[0] = v8::Exception::Error(NanNew<v8::String>(data->errorString)); } else { argv[0] = NanUndefined(); // We don't have an error, so clean up the write queue for that fd _WriteQueue *q = qForFD(data->fd); if (q) { q->lock(); QueuedWrite &write_queue = q->get(); while (!write_queue.empty()) { QueuedWrite *del_q = write_queue.next; NanDisposePersistent(del_q->baton->buffer); del_q->remove(); } q->unlock(); deleteQForFD(data->fd); } } data->callback->Call(1, argv); delete data->callback; delete data; delete req; }
void Grid::EIO_AfterEncode(uv_work_t* req) { NanScope(); encode_grid_baton_t *closure = static_cast<encode_grid_baton_t *>(req->data); if (closure->error) { // There is no known ways to throw errors in the processing prior // so simply removing the following from coverage /* LCOV_EXCL_START */ Local<Value> argv[1] = { NanError(closure->error_name.c_str()) }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 1, argv); /* LCOV_EXCL_END */ } else { // convert key order to proper javascript array Local<Array> keys_a = NanNew<Array>(closure->key_order.size()); std::vector<std::string>::iterator it; unsigned int i; for (it = closure->key_order.begin(), i = 0; it < closure->key_order.end(); ++it, ++i) { keys_a->Set(i, NanNew((*it).c_str())); } mapnik::grid const& grid_type = *closure->g->get(); // gather feature data Local<Object> feature_data = NanNew<Object>(); if (closure->add_features) { node_mapnik::write_features<mapnik::grid>(grid_type, feature_data, closure->key_order); } // Create the return hash. Local<Object> json = NanNew<Object>(); Local<Array> grid_array = NanNew<Array>(closure->lines.size()); unsigned array_size = std::ceil(grid_type.width()/static_cast<float>(closure->resolution)); for (unsigned j=0;j<closure->lines.size();++j) { node_mapnik::grid_line_type const & line = closure->lines[j]; grid_array->Set(j, NanNew<String>(line.get(),array_size)); } json->Set(NanNew("grid"), grid_array); json->Set(NanNew("keys"), keys_a); json->Set(NanNew("data"), feature_data); Local<Value> argv[2] = { NanNull(), NanNew(json) }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 2, argv); } closure->g->Unref(); NanDisposePersistent(closure->cb); delete closure; }
// Any new return statements added here should call MakeWeak() on bufferHandle_, // if it is not empty. void CallbackOverride(SQLRETURN ret) { EOS_DEBUG_METHOD(); if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) { if (!bufferHandle_.IsEmpty()) NanDisposePersistent(bufferHandle_); return CallbackErrorOverride(ret); } EOS_DEBUG(L"Final Result: %hi\n", ret); Handle<Value> argv[4]; argv[0] = NanUndefined(); if (totalLength_ != SQL_NO_TOTAL) argv[2] = NanNew<Number>(totalLength_); else argv[2] = NanUndefined(); argv[3] = NanNew<Boolean>(totalLength_ > bufferLength_ || (totalLength_ == SQL_NO_TOTAL && ret == SQL_SUCCESS_WITH_INFO)); if (ret == SQL_NO_DATA) argv[1] = NanUndefined(); else if (totalLength_ == SQL_NULL_DATA) argv[1] = NanNull(); else if (raw_) { assert(!bufferHandle_.IsEmpty()); argv[1] = NanNew(bufferHandle_); } else if (cType_ == SQL_C_BINARY) { if (totalLength_ >= bufferLength_) argv[1] = NanNew(bufferHandle_); else argv[1] = JSBuffer::Slice(NanNew(bufferHandle_), 0, totalLength_); } else { argv[1] = Eos::ConvertToJS(buffer_, totalLength_, bufferLength_, cType_); if (argv[1]->IsUndefined()) argv[0] = OdbcError("Unable to interpret contents of result buffer"); } // Can we Dispose() things that JS-land is referencing? We'll soon find out! if (!bufferHandle_.IsEmpty()) NanDisposePersistent(bufferHandle_); MakeCallback(argv); }
void ClrFuncInvokeContext::DisposeCallback() { if (this->callback) { DBG("ClrFuncInvokeContext::DisposeCallback"); NanDisposePersistent(*(this->callback)); delete this->callback; this->callback = NULL; } }
void EIO_AfterWrite(uv_work_t* req) { NanScope(); QueuedWrite* queuedWrite = static_cast<QueuedWrite*>(req->data); WriteBaton* data = static_cast<WriteBaton*>(queuedWrite->baton); v8::Handle<v8::Value> argv[2]; if(data->errorString[0]) { argv[0] = v8::Exception::Error(NanNew<v8::String>(data->errorString)); argv[1] = NanUndefined(); } else { argv[0] = NanUndefined(); argv[1] = NanNew<v8::Int32>(data->result); } data->callback->Call(2, argv); if (data->offset < data->bufferLength && !data->errorString[0]) { // We're not done with this baton, so throw it right back onto the queue. // Don't re-push the write in the event loop if there was an error; because same error could occur again! // TODO: Add a uv_poll here for unix... //fprintf(stderr, "Write again...\n"); uv_queue_work(uv_default_loop(), req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); return; } int fd = data->fd; _WriteQueue *q = qForFD(fd); if(!q) { NanThrowTypeError("There's no write queue for that file descriptor (after write)!"); return; } q->lock(); QueuedWrite &write_queue = q->get(); // remove this one from the list queuedWrite->remove(); // If there are any left, start a new thread to write the next one. if (!write_queue.empty()) { // Always pull the next work item from the head of the queue QueuedWrite* nextQueuedWrite = write_queue.next; uv_queue_work(uv_default_loop(), &nextQueuedWrite->req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); } q->unlock(); NanDisposePersistent(data->buffer); delete data->callback; delete data; delete queuedWrite; }
/* DESCRIPTION Destructor for the Oracledb class. */ Oracledb::~Oracledb() { if ( fetchAsStringTypes_ ) { free ( fetchAsStringTypes_ ); fetchAsStringTypes_ = NULL ; fetchAsStringTypesCount_ = 0; } if (this->dpienv_) { dpienv_->terminate(); } NanDisposePersistent(jsOracledb); }
void NodeFileSource::notify(mbgl::Request *req, const std::shared_ptr<const mbgl::Response>& response) { // First, remove the request, since it might be destructed at any point now. auto it = pending.find(req); if (it != pending.end()) { #if (NODE_MODULE_VERSION <= NODE_0_10_MODULE_VERSION) NanDisposePersistent(it->second); #endif pending.erase(it); // Make sure the the loop can exit when there are no pending requests. if (pending.empty()) { queue->unref(); } } req->notify(response); }
void Grid::EIO_AfterClear(uv_work_t* req) { NanScope(); clear_grid_baton_t *closure = static_cast<clear_grid_baton_t *>(req->data); if (closure->error) { Local<Value> argv[1] = { NanError(closure->error_name.c_str()) }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 1, argv); } else { Local<Value> argv[2] = { NanNull() }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 1, argv); } closure->g->Unref(); NanDisposePersistent(closure->cb); delete closure; }
void EIO_AfterWrite(uv_work_t* req) { NanScope(); QueuedWrite* queuedWrite = static_cast<QueuedWrite*>(req->data); WriteBaton* data = static_cast<WriteBaton*>(queuedWrite->baton); v8::Handle<v8::Value> argv[2]; if(data->errorString[0]) { argv[0] = v8::Exception::Error(NanNew<v8::String>(data->errorString)); argv[1] = NanUndefined(); } else { argv[0] = NanUndefined(); argv[1] = NanNew<v8::Int32>(data->result); } data->callback->Call(2, argv); if (data->offset < data->bufferLength && !data->errorString[0]) { // We're not done with this baton, so throw it right back onto the queue. // Don't re-push the write in the event loop if there was an error; because same error could occur again! // TODO: Add a uv_poll here for unix... uv_queue_work(uv_default_loop(), req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); return; } uv_mutex_lock(&write_queue_mutex); QUEUE_REMOVE(&queuedWrite->queue); if (!QUEUE_EMPTY(&write_queue)) { // Always pull the next work item from the head of the queue QUEUE* head = QUEUE_HEAD(&write_queue); QueuedWrite* nextQueuedWrite = QUEUE_DATA(head, QueuedWrite, queue); uv_queue_work(uv_default_loop(), &nextQueuedWrite->req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite); } uv_mutex_unlock(&write_queue_mutex); NanDisposePersistent(data->buffer); delete data->callback; delete data; delete queuedWrite; }
void NodeFileSource::processCancel(mbgl::Request *req) { NanScope(); auto it = pending.find(req); if (it == pending.end()) { // The response callback was already fired. There is no point in calling the cancelation // callback because the request is already completed. } else { #if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) auto requestHandle = v8::Local<v8::Object>::New(v8::Isolate::GetCurrent(), it->second); #else auto requestHandle = NanNew<v8::Object>(it->second); #endif // Dispose and remove the persistent handle #if (NODE_MODULE_VERSION <= NODE_0_10_MODULE_VERSION) NanDisposePersistent(it->second); #endif pending.erase(it); // Make sure the the loop can exit when there are no pending requests. if (pending.empty()) { queue->unref(); } auto handle = NanObjectWrapHandle(this); if (handle->Has(NanNew("cancel"))) { v8::Local<v8::Value> argv[] = { requestHandle }; NanMakeCallback(handle, NanNew("cancel"), 1, argv); } // Set the request handle in the request wrapper handle to null ObjectWrap::Unwrap<NodeRequest>(requestHandle)->cancel(); } // Finally, destruct the request object req->destruct(); }
CoreClrFuncInvokeContext::~CoreClrFuncInvokeContext() { DBG("CoreClrFuncInvokeContext::~CoreClrFuncInvokeContext"); if (this->callback) { NanDisposePersistent(*(this->callback)); delete this->callback; this->callback = NULL; } if (this->task) { CoreClrEmbedding::FreeHandle(this->task); this->task = NULL; } if (this->resultData) { CoreClrEmbedding::FreeMarshalData(this->resultData, this->resultType); this->resultData = NULL; } }
void Grid::EIO_AfterClear(uv_work_t* req) { NanScope(); clear_grid_baton_t *closure = static_cast<clear_grid_baton_t *>(req->data); if (closure->error) { // There seems to be no possible way for the exception to be thrown in the previous // process and therefore not possible to have an error here so removing it from code // coverage /* LCOV_EXCL_START */ Local<Value> argv[1] = { NanError(closure->error_name.c_str()) }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 1, argv); /* LCOV_EXCL_END */ } else { Local<Value> argv[2] = { NanNull(), NanObjectWrapHandle(closure->g) }; NanMakeCallback(NanGetCurrentContext()->Global(), NanNew(closure->cb), 2, argv); } closure->g->Unref(); NanDisposePersistent(closure->cb); delete closure; }
NodejsFunc::~NodejsFunc() { DBG("NodejsFunc::~NodejsFunc"); NanDisposePersistent(*(this->Func)); delete this->Func; }
virtual ~BufferImageSource() { NanDisposePersistent(mImageBuffer); }
void NodeFileSource::processCancel(const mbgl::Resource& resource) { NanScope(); auto it = pending.find(resource); if (it == pending.end()) { // The response callback was already fired. There is no point in calling the cancelation // callback because the request is already completed. } else { #if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) auto requestHandle = v8::Local<v8::Object>::New(v8::Isolate::GetCurrent(), it->second); it->second.Reset(); #else auto requestHandle = NanNew<v8::Object>(it->second); NanDisposePersistent(it->second); #endif pending.erase(it); // Make sure the the loop can exit when there are no pending requests. if (pending.empty()) { queue->unref(); } #if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) auto optionsObject = v8::Local<v8::Object>::New(v8::Isolate::GetCurrent(), options); if (optionsObject->Has(NanNew("cancel"))) { auto cancelFunction = optionsObject->Get(NanNew("cancel")).As<v8::Function>(); #else if (options->Has(NanNew("cancel"))) { auto cancelFunction = options->Get(NanNew("cancel")).As<v8::Function>(); #endif v8::Local<v8::Value> argv[] = { requestHandle }; NanMakeCallback(NanGetCurrentContext()->Global(), cancelFunction, 1, argv); } // Set the request handle in the request wrapper handle to null node::ObjectWrap::Unwrap<NodeRequest>(requestHandle)->cancel(); } } void NodeFileSource::notify(const mbgl::Resource& resource, const std::shared_ptr<const mbgl::Response>& response) { // First, remove the request, since it might be destructed at any point now. auto it = pending.find(resource); if (it != pending.end()) { #if (NODE_MODULE_VERSION > NODE_0_10_MODULE_VERSION) it->second.Reset(); #else NanDisposePersistent(it->second); #endif pending.erase(it); // Make sure the the loop can exit when there are no pending requests. if (pending.empty()) { queue->unref(); } } std::lock_guard<std::mutex> lock(observersMutex); auto observersIt = observers.find(resource); if (observersIt == observers.end()) { return; } observersIt->second->notify(response); observers.erase(observersIt); }
ConnectBaton::~ConnectBaton() { NanDisposePersistent(callback); if(error) { delete error; } }