diff --git a/binding.gyp b/binding.gyp index f5b7e40e..373cb766 100644 --- a/binding.gyp +++ b/binding.gyp @@ -8,6 +8,15 @@ "targets": [ { "target_name": "confluent-kafka-javascript", + "cflags!": [ "-fno-exceptions" ], + "cflags_cc!": [ "-fno-exceptions" ], + "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", + "CLANG_CXX_LIBRARY": "libc++", + "MACOSX_DEPLOYMENT_TARGET": "10.7", + }, + "msvs_settings": { + "VCCLCompilerTool": { "ExceptionHandling": 1 }, + }, 'sources': [ 'src/binding.cc', 'src/callbacks.cc', @@ -22,7 +31,7 @@ 'src/admin.cc' ], "include_dirs": [ - "=18.0.0" @@ -65,4 +65,4 @@ "schemaregistry", "schemaregistry-examples" ] -} \ No newline at end of file +} diff --git a/schemaregistry/package.json b/schemaregistry/package.json index 4a0c8f16..1d5b1b15 100644 --- a/schemaregistry/package.json +++ b/schemaregistry/package.json @@ -30,6 +30,7 @@ "uuid": "^10.0.0" }, "dependencies": { + "node-addon-api": "8.3.1", "@aws-sdk/client-kms": "^3.637.0", "@aws-sdk/credential-providers": "^3.637.0", "@azure/identity": "^4.4.1", diff --git a/src/admin.cc b/src/admin.cc index 568cf710..80bf903d 100644 --- a/src/admin.cc +++ b/src/admin.cc @@ -16,7 +16,7 @@ #include "src/workers.h" -using Nan::FunctionCallbackInfo; +using Napi::CallbackInfo; namespace NodeKafka { @@ -30,10 +30,6 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -AdminClient::AdminClient(Conf *gconfig) : Connection(gconfig, NULL) {} - -AdminClient::AdminClient(Connection *connection) : Connection(connection) {} - AdminClient::~AdminClient() { Disconnect(); } @@ -47,8 +43,8 @@ Baton AdminClient::Connect() { * client, as it should always be connected. */ if (m_has_underlying) { return Baton(RdKafka::ERR__STATE, - "Existing client is not connected, and dependent client " - "cannot initiate connection."); + "Existing client is not connected, and dependent client " + "cannot initiate connection."); } Baton baton = setupSaslOAuthBearerConfig(); @@ -98,108 +94,83 @@ Baton AdminClient::Disconnect() { return Baton(RdKafka::ERR_NO_ERROR); } -Nan::Persistent AdminClient::constructor; - -void AdminClient::Init(v8::Local exports) { - Nan::HandleScope scope; - - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("AdminClient").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); - - // Inherited from NodeKafka::Connection - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); - Nan::SetPrototypeMethod(tpl, "name", NodeName); - - // Admin client operations - Nan::SetPrototypeMethod(tpl, "createTopic", NodeCreateTopic); - Nan::SetPrototypeMethod(tpl, "deleteTopic", NodeDeleteTopic); - Nan::SetPrototypeMethod(tpl, "createPartitions", NodeCreatePartitions); - Nan::SetPrototypeMethod(tpl, "deleteRecords", NodeDeleteRecords); - Nan::SetPrototypeMethod(tpl, "describeTopics", NodeDescribeTopics); - Nan::SetPrototypeMethod(tpl, "listOffsets", NodeListOffsets); - - // Consumer group related operations - Nan::SetPrototypeMethod(tpl, "listGroups", NodeListGroups); - Nan::SetPrototypeMethod(tpl, "describeGroups", NodeDescribeGroups); - Nan::SetPrototypeMethod(tpl, "deleteGroups", NodeDeleteGroups); - Nan::SetPrototypeMethod(tpl, "listConsumerGroupOffsets", - NodeListConsumerGroupOffsets); - - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); - - constructor.Reset( - (tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked()); - Nan::Set(exports, Nan::New("AdminClient").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); +Napi::FunctionReference AdminClient::constructor; + +void AdminClient::Init(const Napi::Env& env, Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::Function AdminClient = DefineClass(env, "AdminClient", { + // Inherited from NodeKafka::Connection + InstanceMethod("configureCallbacks", &AdminClient::NodeConfigureCallbacks), + InstanceMethod("name", &AdminClient::NodeName), + InstanceMethod("setOAuthBearerToken", &AdminClient::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure", + &NodeSetOAuthBearerTokenFailure), + + // Admin client operations + InstanceMethod("createTopic", &AdminClient::NodeCreateTopic), + InstanceMethod("deleteTopic", &AdminClient::NodeDeleteTopic), + InstanceMethod("createPartitions", &AdminClient::NodeCreatePartitions), + InstanceMethod("deleteRecords", &AdminClient::NodeDeleteRecords), + InstanceMethod("describeTopics", &AdminClient::NodeDescribeTopics), + InstanceMethod("listOffsets", &AdminClient::NodeListOffsets), + + // Consumer group related operations + InstanceMethod("listGroups", &AdminClient::NodeListGroups), + InstanceMethod("describeGroups", &AdminClient::NodeDescribeGroups), + InstanceMethod("deleteGroups", &AdminClient::NodeDeleteGroups), + InstanceMethod("listConsumerGroupOffsets",&AdminClient::NodeListConsumerGroupOffsets), + InstanceMethod("connect", &AdminClient::NodeConnect), + InstanceMethod("disconnect", &AdminClient::NodeDisconnect), + InstanceMethod("setSaslCredentials", &AdminClient::NodeSetSaslCredentials), + InstanceMethod("getMetadata", &AdminClient::NodeGetMetadata), + }); + + constructor.Reset(AdminClient); + exports.Set(Napi::String::New(env, "AdminClient"), AdminClient); } -void AdminClient::New(const Nan::FunctionCallbackInfo& info) { +AdminClient::AdminClient(const Napi::CallbackInfo& info): Connection(info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return; } if (info.Length() < 1) { - return Nan::ThrowError("You must supply a global configuration or a preexisting client"); // NOLINT + Napi::Error::New(env, "You must supply a global configuration or a preexisting client").ThrowAsJavaScriptException(); + return; } Connection *connection = NULL; Conf *gconfig = NULL; AdminClient *client = NULL; - if (info.Length() >= 3 && !info[2]->IsNull() && !info[2]->IsUndefined()) { - if (!info[2]->IsObject()) { - return Nan::ThrowError("Third argument, if provided, must be a client object"); // NOLINT + if (info.Length() >= 3 && !info[2].IsNull() && !info[2].IsUndefined()) { + if (!info[2].IsObject()) { + Napi::Error::New(env, "Third argument, if provided, must be a client object").ThrowAsJavaScriptException(); + return; } // We check whether this is a wrapped object within the calling JavaScript // code, so it's safe to unwrap it here. We Unwrap it directly into a // Connection object, since it's OK to unwrap into the parent class. - connection = ObjectWrap::Unwrap( - info[2]->ToObject(Nan::GetCurrentContext()).ToLocalChecked()); - client = new AdminClient(connection); + connection = ObjectWrap::Unwrap(info[2].ToObject()); + this->ConfigFromExisting(connection); } else { - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return; } std::string errstr; - gconfig = Conf::create( - RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, info[0].ToObject(), errstr); if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; } - client = new AdminClient(gconfig); + this->Config(gconfig, NULL); } - - // Wrap it - client->Wrap(info.This()); - - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - info.GetReturnValue().Set(info.This()); -} - -v8::Local AdminClient::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; - - const unsigned argc = 1; - - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); - - return scope.Escape(instance); } /** @@ -311,14 +282,14 @@ Baton AdminClient::CreateTopic(rd_kafka_NewTopic_t* topic, int timeout_ms) { const char *errmsg = rd_kafka_topic_result_error_string(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - if (errmsg) { - const std::string errormsg = std::string(errmsg); - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode), errormsg); // NOLINT - } else { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); - } + if (errmsg) { + const std::string errormsg = std::string(errmsg); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode), errormsg); // NOLINT + } else { + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); + } } } @@ -389,8 +360,8 @@ Baton AdminClient::DeleteTopic(rd_kafka_DeleteTopic_t* topic, int timeout_ms) { const rd_kafka_resp_err_t errcode = rd_kafka_topic_result_error(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); } } @@ -465,14 +436,14 @@ Baton AdminClient::CreatePartitions( const char *errmsg = rd_kafka_topic_result_error_string(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - if (errmsg) { - const std::string errormsg = std::string(errmsg); - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode), errormsg); // NOLINT - } else { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); - } + if (errmsg) { + const std::string errormsg = std::string(errmsg); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode), errormsg); // NOLINT + } else { + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); + } } } @@ -497,21 +468,21 @@ Baton AdminClient::ListGroups( // Make admin options to establish that we are listing groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (is_match_states_set) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_match_consumer_group_states( - options, &match_states[0], match_states.size()); + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, &match_states[0], match_states.size()); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -524,7 +495,7 @@ Baton AdminClient::ListGroups( // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -552,9 +523,9 @@ Baton AdminClient::ListGroups( } Baton AdminClient::DescribeGroups(std::vector &groups, - bool include_authorized_operations, - int timeout_ms, - /* out */ rd_kafka_event_t **event_response) { + bool include_authorized_operations, + int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -567,21 +538,21 @@ Baton AdminClient::DescribeGroups(std::vector &groups, // Make admin options to establish that we are describing groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (include_authorized_operations) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_include_authorized_operations( - options, include_authorized_operations); + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -595,13 +566,13 @@ Baton AdminClient::DescribeGroups(std::vector &groups, } rd_kafka_DescribeConsumerGroups(m_client->c_ptr(), &c_groups[0], - groups.size(), options, rkqu); + groups.size(), options, rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -629,8 +600,8 @@ Baton AdminClient::DescribeGroups(std::vector &groups, } Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, - size_t group_cnt, int timeout_ms, - /* out */ rd_kafka_event_t **event_response) { + size_t group_cnt, int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -643,11 +614,11 @@ Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, // Make admin options to establish that we are deleting groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETEGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETEGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -656,13 +627,13 @@ Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_DeleteGroups(m_client->c_ptr(), group_list, group_cnt, options, - rkqu); + rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -705,21 +676,21 @@ Baton AdminClient::ListConsumerGroupOffsets( // Make admin options to establish that we are fetching offsets rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (require_stable_offsets) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_require_stable_offsets( - options, require_stable_offsets); + rd_kafka_AdminOptions_set_require_stable_offsets( + options, require_stable_offsets); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -727,13 +698,13 @@ Baton AdminClient::ListConsumerGroupOffsets( rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_ListConsumerGroupOffsets(m_client->c_ptr(), req, req_cnt, options, - rkqu); + rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -761,9 +732,9 @@ Baton AdminClient::ListConsumerGroupOffsets( } Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, - size_t del_records_cnt, - int operation_timeout_ms, int timeout_ms, - rd_kafka_event_t **event_response) { + size_t del_records_cnt, + int operation_timeout_ms, int timeout_ms, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -776,17 +747,17 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, // Make admin options to establish that we are deleting records rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETERECORDS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETERECORDS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } err = rd_kafka_AdminOptions_set_operation_timeout( - options, operation_timeout_ms, errstr, sizeof(errstr)); + options, operation_timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -795,13 +766,13 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_DeleteRecords(m_client->c_ptr(), del_records, - del_records_cnt, options, rkqu); + del_records_cnt, options, rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DELETERECORDS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DELETERECORDS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -829,9 +800,9 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, } Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, - bool include_authorized_operations, - int timeout_ms, - rd_kafka_event_t **event_response) { + bool include_authorized_operations, + int timeout_ms, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -844,20 +815,20 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, // Make admin options to establish that we are describing topics rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); if (include_authorized_operations) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_include_authorized_operations( - options, include_authorized_operations); + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -871,7 +842,7 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -900,9 +871,9 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, - int timeout_ms, - rd_kafka_IsolationLevel_t isolation_level, - rd_kafka_event_t **event_response) { + int timeout_ms, + rd_kafka_IsolationLevel_t isolation_level, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -915,17 +886,17 @@ Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, // Make admin options to establish that we are fetching offsets rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTOFFSETS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTOFFSETS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_isolation_level(options, isolation_level); + rd_kafka_AdminOptions_set_isolation_level(options, isolation_level); if (error) { return Baton::BatonFromErrorAndDestroy(error); } @@ -939,7 +910,7 @@ Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_LISTOFFSETS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_LISTOFFSETS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -989,10 +960,9 @@ void AdminClient::DeactivateDispatchers() { * C++ Exported prototype functions */ -NAN_METHOD(AdminClient::NodeConnect) { - Nan::HandleScope scope; - - AdminClient* client = ObjectWrap::Unwrap(info.This()); +Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. @@ -1000,138 +970,145 @@ NAN_METHOD(AdminClient::NodeConnect) { // Because the Admin Client connect is synchronous, we can do this within // AdminClient::Connect as well, but we do it here to keep the code similiar // to the Producer and Consumer. - client->ActivateDispatchers(); + this->ActivateDispatchers(); - Baton b = client->Connect(); + Baton b = this->Connect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(AdminClient::NodeDisconnect) { - Nan::HandleScope scope; - - AdminClient* client = ObjectWrap::Unwrap(info.This()); +Napi::Value AdminClient::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Baton b = client->Disconnect(); + Baton b = this->Disconnect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } /** * Create topic */ -NAN_METHOD(AdminClient::NodeCreateTopic) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - return Nan::ThrowError("Must provide 'timeout'"); + if (!info[1].IsNumber()) { + Napi::Error::New(env, "Must provide 'timeout'").ThrowAsJavaScriptException(); + return env.Null(); } // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient* client = this; // Get the timeout - int timeout = Nan::To(info[1]).FromJust(); + int timeout = info[1].As().Int32Value(); std::string errstr; // Get that topic we want to create rd_kafka_NewTopic_t* topic = Conversion::Admin::FromV8TopicObject( - info[0].As(), errstr); + info[0].As(), errstr); if (topic == NULL) { - Nan::ThrowError(errstr.c_str()); - return; + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } // Queue up dat work - Nan::AsyncQueueWorker( - new Workers::AdminClientCreateTopic(callback, client, topic, timeout)); - - return info.GetReturnValue().Set(Nan::Null()); + Napi::AsyncWorker* worker = new Workers::AdminClientCreateTopic(callback, client, topic, timeout); + worker->Queue(); + return env.Null(); } /** * Delete topic */ -NAN_METHOD(AdminClient::NodeDeleteTopic) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + return ThrowError(env, "Need to specify a callback"); } - if (!info[1]->IsNumber() || !info[0]->IsString()) { - return Nan::ThrowError("Must provide 'timeout', and 'topicName'"); + if (!info[1].IsNumber() || !info[0].IsString()) { + return ThrowError(env, "Must provide 'timeout', and 'topicName'"); } // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient* client = this; // Get the topic name from the string - std::string topic_name = Util::FromV8String( - Nan::To(info[0]).ToLocalChecked()); + std::string topic_name = Util::FromV8String(info[0].ToString()); // Get the timeout - int timeout = Nan::To(info[1]).FromJust(); + int timeout = info[1].As().Int32Value(); // Get that topic we want to create rd_kafka_DeleteTopic_t* topic = rd_kafka_DeleteTopic_new( topic_name.c_str()); // Queue up dat work - Nan::AsyncQueueWorker( - new Workers::AdminClientDeleteTopic(callback, client, topic, timeout)); - - return info.GetReturnValue().Set(Nan::Null()); + Napi::AsyncWorker* worker = new Workers::AdminClientDeleteTopic(callback, client, topic, timeout); + worker->Queue(); + return env.Null(); } /** * Delete topic */ -NAN_METHOD(AdminClient::NodeCreatePartitions) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); if (info.Length() < 4) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsFunction()) { + if (!info[3].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback 2"); + Napi::Error::New(env, "Need to specify a callback 2").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsNumber() || !info[1]->IsNumber() || !info[0]->IsString()) { - return Nan::ThrowError( + if (!info[2].IsNumber() || !info[1].IsNumber() || !info[0].IsString()) { + return ThrowError(env, "Must provide 'totalPartitions', 'timeout', and 'topicName'"); } // Create the final callback object - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient* client = this; // Get the timeout - int timeout = Nan::To(info[2]).FromJust(); + int timeout = info[2].As().Int32Value(); // Get the total number of desired partitions - int partition_total_count = Nan::To(info[1]).FromJust(); + int partition_total_count = info[1].As().Int32Value(); // Get the topic name from the string - std::string topic_name = Util::FromV8String( - Nan::To(info[0]).ToLocalChecked()); + std::string topic_name = Util::FromV8String(info[0].ToString()); // Create an error buffer we can throw char* errbuf = reinterpret_cast(malloc(100)); @@ -1143,91 +1120,101 @@ NAN_METHOD(AdminClient::NodeCreatePartitions) { // If we got a failure on the create new partitions request, // fail here if (new_partitions == NULL) { - return Nan::ThrowError(errbuf); + Napi::Error::New(env, errbuf).ThrowAsJavaScriptException(); + return env.Null(); } // Queue up dat work - Nan::AsyncQueueWorker(new Workers::AdminClientCreatePartitions( - callback, client, new_partitions, timeout)); + Napi::AsyncWorker* worker = new Workers::AdminClientCreatePartitions( + callback, client, new_partitions, timeout); - return info.GetReturnValue().Set(Nan::Null()); + worker->Queue(); + return env.Null(); } /** * List Consumer Groups. */ -NAN_METHOD(AdminClient::NodeListGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction()) { + if (info.Length() < 2 || !info[1].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local config = info[0].As(); + Napi::Object config = info[0].As(); // Create the final callback object - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient *client = this; // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); // Get the match states, or not if they are unset. std::vector match_states; - v8::Local match_consumer_group_states_key = - Nan::New("matchConsumerGroupStates").ToLocalChecked(); - bool is_match_states_set = - Nan::Has(config, match_consumer_group_states_key).FromMaybe(false); - v8::Local match_states_array = Nan::New(); + Napi::String match_consumer_group_states_key = + Napi::String::New(env, "matchConsumerGroupStates"); + bool is_match_states_set = config.Has(match_consumer_group_states_key); + Napi::Array match_states_array = Napi::Array::New(env); if (is_match_states_set) { - match_states_array = GetParameter>( - config, "matchConsumerGroupStates", match_states_array); - if (match_states_array->Length()) { + match_states_array = GetParameter( + config, "matchConsumerGroupStates", match_states_array); + if (match_states_array.Length()) { match_states = Conversion::Admin::FromV8GroupStateArray( - match_states_array); + match_states_array); } } // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientListGroups( - callback, client, is_match_states_set, match_states, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientListGroups( + callback, client, is_match_states_set, match_states, timeout_ms); + worker->Queue(); + + return env.Null(); } /** * Describe Consumer Groups. */ -NAN_METHOD(AdminClient::NodeDescribeGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDescribeGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + return ThrowError(env, "Need to specify a callback"); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide group name array"); + if (!info[0].IsArray()) { + return ThrowError(env, "Must provide group name array"); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[1].IsObject()) { + return ThrowError(env, "Must provide options object"); } // Get list of group names to describe. - v8::Local group_names = info[0].As(); - if (group_names->Length() == 0) { - return Nan::ThrowError("Must provide at least one group name"); + Napi::Array group_names = info[0].As(); + if (group_names.Length() == 0) { + return ThrowError(env, "Must provide at least one group name"); } std::vector group_names_vector = v8ArrayToStringVector(group_names); - v8::Local config = info[1].As(); + Napi::Object config = info[1].As(); // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); @@ -1237,40 +1224,48 @@ NAN_METHOD(AdminClient::NodeDescribeGroups) { GetParameter(config, "includeAuthorizedOperations", false); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientDescribeGroups( + Napi::AsyncWorker *worker = new Workers::AdminClientDescribeGroups( callback, client, group_names_vector, include_authorized_operations, - timeout_ms)); + timeout_ms); + worker->Queue(); + return env.Null(); } /** * Delete Consumer Groups. */ -NAN_METHOD(AdminClient::NodeDeleteGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide group name array"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide group name array").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); + return env.Null(); } // Get list of group names to delete, and convert it into an // rd_kafka_DeleteGroup_t array. - v8::Local group_names = info[0].As(); - if (group_names->Length() == 0) { - return Nan::ThrowError("Must provide at least one group name"); + Napi::Array group_names = info[0].As(); + if (group_names.Length() == 0) { + Napi::Error::New(env, "Must provide at least one group name").ThrowAsJavaScriptException(); + return env.Null(); } std::vector group_names_vector = v8ArrayToStringVector(group_names); @@ -1282,39 +1277,46 @@ NAN_METHOD(AdminClient::NodeDeleteGroups) { group_list[i] = rd_kafka_DeleteGroup_new(group_names_vector[i].c_str()); } - v8::Local config = info[1].As(); + Napi::Object config = info[1].As(); // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientDeleteGroups( - callback, client, group_list, group_names_vector.size(), timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientDeleteGroups( + callback, client, group_list, group_names_vector.size(), timeout_ms); + worker->Queue(); + return env.Null(); } /** * List Consumer Group Offsets. */ -NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'listGroupOffsets'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'listGroupOffsets'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listGroupOffsets = info[0].As(); + Napi::Array listGroupOffsets = info[0].As(); - if (listGroupOffsets->Length() == 0) { - return Nan::ThrowError("'listGroupOffsets' cannot be empty"); + if (listGroupOffsets.Length() == 0) { + Napi::Error::New(env, "'listGroupOffsets' cannot be empty").ThrowAsJavaScriptException(); + return env.Null(); } /** @@ -1324,53 +1326,51 @@ NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { */ rd_kafka_ListConsumerGroupOffsets_t **requests = static_cast( - malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * - listGroupOffsets->Length())); - - for (uint32_t i = 0; i < listGroupOffsets->Length(); ++i) { - v8::Local listGroupOffsetValue = - Nan::Get(listGroupOffsets, i).ToLocalChecked(); - if (!listGroupOffsetValue->IsObject()) { - return Nan::ThrowError("Each entry must be an object"); + malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * + listGroupOffsets.Length())); + + for (uint32_t i = 0; i < listGroupOffsets.Length(); ++i) { + Napi::Value listGroupOffsetValue = + (listGroupOffsets).Get(i); + if (!listGroupOffsetValue.IsObject()) { + Napi::Error::New(env, "Each entry must be an object").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listGroupOffsetObj = - listGroupOffsetValue.As(); - - v8::Local groupIdValue; - if (!Nan::Get(listGroupOffsetObj, Nan::New("groupId").ToLocalChecked()) - .ToLocal(&groupIdValue)) { - return Nan::ThrowError("Each entry must have 'groupId'"); + Napi::Object listGroupOffsetObj = + listGroupOffsetValue.As(); + + Napi::Value groupIdValue; + if (!(listGroupOffsetObj).Has(Napi::String::New(env, "groupId"))) { + Napi::Error::New(env, "Each entry must have 'groupId'").ThrowAsJavaScriptException(); + return env.Null(); + } else { + groupIdValue = listGroupOffsetObj.Get(Napi::String::New(env, "groupId")); } - Nan::MaybeLocal groupIdMaybe = - Nan::To(groupIdValue); - if (groupIdMaybe.IsEmpty()) { - return Nan::ThrowError("'groupId' must be a string"); - } - Nan::Utf8String groupIdUtf8(groupIdMaybe.ToLocalChecked()); - std::string groupIdStr = *groupIdUtf8; + std::string groupIdStr = groupIdValue.ToString().Utf8Value(); - v8::Local partitionsValue; rd_kafka_topic_partition_list_t *partitions = NULL; - if (Nan::Get(listGroupOffsetObj, Nan::New("partitions").ToLocalChecked()) - .ToLocal(&partitionsValue) && - partitionsValue->IsArray()) { - v8::Local partitionsArray = partitionsValue.As(); - - if (partitionsArray->Length() > 0) { - partitions = Conversion::TopicPartition:: - TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); - if (partitions == NULL) { - return Nan::ThrowError( - "Failed to convert partitions to list, provide proper object in " - "partitions"); - } + Napi::MaybeOrValue partitionsValue = + listGroupOffsetObj.Get(Napi::String::New(env, "partitions")); + + + if (partitionsValue.IsArray()) { + Napi::Array partitionsArray = partitionsValue.As(); + + if (partitionsArray.Length() > 0) { + partitions = Conversion::TopicPartition:: + TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); + if (partitions == NULL) { + return ThrowError(env, + "Failed to convert partitions to list, provide proper object in " + "partitions"); + } } } requests[i] = - rd_kafka_ListConsumerGroupOffsets_new(groupIdStr.c_str(), partitions); + rd_kafka_ListConsumerGroupOffsets_new(groupIdStr.c_str(), partitions); if (partitions != NULL) { rd_kafka_topic_partition_list_destroy(partitions); @@ -1378,48 +1378,55 @@ NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { } // Now process the second argument: options (timeout and requireStableOffsets) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); bool require_stable_offsets = GetParameter(options, "requireStableOffsets", false); int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientListConsumerGroupOffsets( - callback, client, requests, listGroupOffsets->Length(), - require_stable_offsets, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientListConsumerGroupOffsets( + callback, client, requests, listGroupOffsets.Length(), + require_stable_offsets, timeout_ms); + worker->Queue(); + return env.Null(); } /** * Delete Records. */ -NAN_METHOD(AdminClient::NodeDeleteRecords) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError( - "Must provide array containg 'TopicPartitionOffset' objects"); + if (!info[0].IsArray()) { + return ThrowError(env, + "Must provide array containg 'TopicPartitionOffset' objects"); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide 'options' object"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Must provide 'options' object").ThrowAsJavaScriptException(); + return env.Null(); } // Get list of TopicPartitions to delete records from // and convert it into rd_kafka_DeleteRecords_t array - v8::Local delete_records_list = info[0].As(); + Napi::Array delete_records_list = info[0].As(); - if (delete_records_list->Length() == 0) { - return Nan::ThrowError("Must provide at least one TopicPartitionOffset"); + if (delete_records_list.Length() == 0) { + Napi::Error::New(env, "Must provide at least one TopicPartitionOffset").ThrowAsJavaScriptException(); + return env.Null(); } /** @@ -1429,55 +1436,64 @@ NAN_METHOD(AdminClient::NodeDeleteRecords) { */ rd_kafka_DeleteRecords_t **delete_records = static_cast( - malloc(sizeof(rd_kafka_DeleteRecords_t *) * 1)); + malloc(sizeof(rd_kafka_DeleteRecords_t *) * 1)); rd_kafka_topic_partition_list_t *partitions = Conversion::TopicPartition::TopicPartitionv8ArrayToTopicPartitionList( - delete_records_list, true); + delete_records_list, true); if (partitions == NULL) { - return Nan::ThrowError( - "Failed to convert objects in delete records list, provide proper " - "TopicPartitionOffset objects"); + return ThrowError(env, + "Failed to convert objects in delete records list, provide proper " + "TopicPartitionOffset objects"); } delete_records[0] = rd_kafka_DeleteRecords_new(partitions); rd_kafka_topic_partition_list_destroy(partitions); // Now process the second argument: options (timeout and operation_timeout) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); int operation_timeout_ms = GetParameter(options, "operation_timeout", 60000); int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientDeleteRecords( - callback, client, delete_records, 1, operation_timeout_ms, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientDeleteRecords( + callback, client, delete_records, 1, operation_timeout_ms, timeout_ms); + + worker->Queue(); + return env.Null(); } /** * Describe Topics. */ -NAN_METHOD(AdminClient::NodeDescribeTopics) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'topicNames'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'topicNames'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local topicNames = info[0].As(); + Napi::Array topicNames = info[0].As(); - if (topicNames->Length() == 0) { - return Nan::ThrowError("'topicNames' cannot be empty"); + if (topicNames.Length() == 0) { + Napi::Error::New(env, "'topicNames' cannot be empty").ThrowAsJavaScriptException(); + return env.Null(); } std::vector topicNamesVector = v8ArrayToStringVector(topicNames); @@ -1499,38 +1515,45 @@ NAN_METHOD(AdminClient::NodeDescribeTopics) { free(topics); - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); bool include_authorised_operations = GetParameter(options, "includeAuthorizedOperations", false); int timeout_ms = GetParameter(options, "timeout", 5000); - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; + + Napi::AsyncWorker *worker = new Workers::AdminClientDescribeTopics( + callback, client, topic_collection, include_authorised_operations, + timeout_ms); + worker->Queue(); - Nan::AsyncQueueWorker(new Workers::AdminClientDescribeTopics( - callback, client, topic_collection, - include_authorised_operations, timeout_ms)); + return env.Null(); } /** * List Offsets. */ -NAN_METHOD(AdminClient::NodeListOffsets) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'TopicPartitionOffsets'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'TopicPartitionOffsets'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listOffsets = info[0].As(); + Napi::Array listOffsets = info[0].As(); /** * The ownership of this is taken by @@ -1541,23 +1564,26 @@ NAN_METHOD(AdminClient::NodeListOffsets) { TopicPartitionOffsetSpecv8ArrayToTopicPartitionList(listOffsets); // Now process the second argument: options (timeout and isolationLevel) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); rd_kafka_IsolationLevel_t isolation_level = static_cast(GetParameter( - options, "isolationLevel", - static_cast(RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED))); + options, "isolationLevel", + static_cast(RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED))); int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientListOffsets( - callback, client, partitions, timeout_ms, isolation_level)); + Napi::AsyncWorker *worker = new Workers::AdminClientListOffsets( + callback, client, partitions, timeout_ms, isolation_level); + worker->Queue(); + return env.Null(); } } // namespace NodeKafka diff --git a/src/admin.h b/src/admin.h index 9a269134..6f517ae6 100644 --- a/src/admin.h +++ b/src/admin.h @@ -11,7 +11,8 @@ #ifndef SRC_ADMIN_H_ #define SRC_ADMIN_H_ -#include +#include +#include #include #include #include @@ -36,11 +37,11 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class AdminClient : public Connection { +class AdminClient : public Connection { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(const Napi::Env&, Napi::Object); + AdminClient(const Napi::CallbackInfo&); void ActivateDispatchers(); void DeactivateDispatchers(); @@ -75,34 +76,32 @@ class AdminClient : public Connection { rd_kafka_IsolationLevel_t isolation_level, rd_kafka_event_t** event_response); + void ConfigFromExisting(Connection* existing); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; - explicit AdminClient(Conf* globalConfig); - explicit AdminClient(Connection* existingConnection); ~AdminClient(); bool is_derived = false; private: // Node methods - // static NAN_METHOD(NodeValidateTopic); - static NAN_METHOD(NodeCreateTopic); - static NAN_METHOD(NodeDeleteTopic); - static NAN_METHOD(NodeCreatePartitions); + // static Napi::Value NodeValidateTopic(const Napi::CallbackInfo& info); + Napi::Value NodeCreateTopic(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteTopic(const Napi::CallbackInfo& info); + Napi::Value NodeCreatePartitions(const Napi::CallbackInfo& info); // Consumer group operations - static NAN_METHOD(NodeListGroups); - static NAN_METHOD(NodeDescribeGroups); - static NAN_METHOD(NodeDeleteGroups); - static NAN_METHOD(NodeListConsumerGroupOffsets); - static NAN_METHOD(NodeDeleteRecords); - static NAN_METHOD(NodeDescribeTopics); - static NAN_METHOD(NodeListOffsets); - - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeDisconnect); + Napi::Value NodeListGroups(const Napi::CallbackInfo& info); + Napi::Value NodeDescribeGroups(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteGroups(const Napi::CallbackInfo& info); + Napi::Value NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteRecords(const Napi::CallbackInfo& info); + Napi::Value NodeDescribeTopics(const Napi::CallbackInfo& info); + Napi::Value NodeListOffsets(const Napi::CallbackInfo& info); + + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/binding.cc b/src/binding.cc index 7b3fe77c..21d907c1 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -9,68 +9,72 @@ */ #include -#include #include "src/binding.h" -using NodeKafka::Producer; -using NodeKafka::KafkaConsumer; using NodeKafka::AdminClient; +using NodeKafka::KafkaConsumer; +using NodeKafka::Producer; using NodeKafka::Topic; -using RdKafka::ErrorCode; +using Napi::Number; -NAN_METHOD(NodeRdKafkaErr2Str) { - int points = Nan::To(info[0]).FromJust(); +Napi::Value NodeRdKafkaErr2Str(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + int points = info[0].As().Int32Value(); // Cast to error code RdKafka::ErrorCode err = static_cast(points); std::string errstr = RdKafka::err2str(err); - info.GetReturnValue().Set(Nan::New(errstr).ToLocalChecked()); + return Napi::String::New(env, errstr); } -NAN_METHOD(NodeRdKafkaBuildInFeatures) { +Napi::Value NodeRdKafkaBuildInFeatures(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); RdKafka::Conf * config = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string features; if (RdKafka::Conf::CONF_OK == config->get("builtin.features", features)) { - info.GetReturnValue().Set(Nan::New(features).ToLocalChecked()); + return Napi::String::New(env, features); } else { - info.GetReturnValue().Set(Nan::Undefined()); + return env.Undefined(); } delete config; } -void ConstantsInit(v8::Local exports) { - v8::Local topicConstants = Nan::New(); +void defconst(Napi::Env env, Napi::Object target, const char *name, Napi::Value value) { + target.Set(Napi::String::New(env, name), value); +} + +void ConstantsInit(Napi::Env env, Napi::Object exports) { + Napi::Object topicConstants = Napi::Object::New(env); // RdKafka Error Code definitions - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::PARTITION_UA); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_BEGINNING); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_END); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_STORED); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_INVALID); + defconst(env, topicConstants, "RdKafka::Topic::PARTITION_UA", Number::New(env, RdKafka::Topic::PARTITION_UA)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_BEGINNING", Number::New(env, RdKafka::Topic::OFFSET_BEGINNING)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_END", Number::New(env, RdKafka::Topic::OFFSET_END)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_STORED", Number::New(env, RdKafka::Topic::OFFSET_STORED)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_INVALID", Number::New(env, RdKafka::Topic::OFFSET_INVALID)); - Nan::Set(exports, Nan::New("topic").ToLocalChecked(), topicConstants); + (exports).Set(Napi::String::New(env, "topic"), topicConstants); - Nan::Set(exports, Nan::New("err2str").ToLocalChecked(), - Nan::GetFunction(Nan::New(NodeRdKafkaErr2Str)).ToLocalChecked()); // NOLINT + (exports).Set(Napi::String::New(env, "err2str"),Napi::Function::New(env, NodeRdKafkaErr2Str)); - Nan::Set(exports, Nan::New("features").ToLocalChecked(), - Nan::GetFunction(Nan::New(NodeRdKafkaBuildInFeatures)).ToLocalChecked()); // NOLINT + (exports).Set(Napi::String::New(env, "features"), Napi::Function::New(env, NodeRdKafkaBuildInFeatures)); } -void Init(v8::Local exports, v8::Local m_, void* v_) { - KafkaConsumer::Init(exports); - Producer::Init(exports); - AdminClient::Init(exports); - Topic::Init(exports); - ConstantsInit(exports); +Napi::Object Init(Napi::Env env, Napi::Object exports) { + KafkaConsumer::Init(env, exports); + Producer::Init(env, exports); + AdminClient::Init(env, exports); + Topic::Init(env, exports); + ConstantsInit(env, exports); - Nan::Set(exports, Nan::New("librdkafkaVersion").ToLocalChecked(), - Nan::New(RdKafka::version_str().c_str()).ToLocalChecked()); + (exports).Set(Napi::String::New(env, "librdkafkaVersion"), + Napi::String::New(env, RdKafka::version_str().c_str())); + return exports; } -NODE_MODULE(kafka, Init) +NODE_API_MODULE(kafka, Init) diff --git a/src/binding.h b/src/binding.h index 0d656b10..b685575a 100644 --- a/src/binding.h +++ b/src/binding.h @@ -10,12 +10,14 @@ #ifndef SRC_BINDING_H_ #define SRC_BINDING_H_ -#include +#include +#include #include #include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/errors.h" #include "src/config.h" +#include "src/workers.h" #include "src/connection.h" #include "src/kafka-consumer.h" #include "src/producer.h" diff --git a/src/callbacks.cc b/src/callbacks.cc index 9e7ce892..41af5bca 100644 --- a/src/callbacks.cc +++ b/src/callbacks.cc @@ -17,35 +17,34 @@ #include "src/kafka-consumer.h" -using v8::Local; -using v8::Value; -using v8::Object; -using v8::String; -using v8::Array; -using v8::Number; +using Napi::Value; +using Napi::Object; +using Napi::String; +using Napi::Array; +using Napi::Number; namespace NodeKafka { namespace Callbacks { -v8::Local TopicPartitionListToV8Array( +Napi::Array TopicPartitionListToV8Array( std::vector parts) { - v8::Local tp_array = Nan::New(); + Napi::Array tp_array = Napi::Array::New(env); for (size_t i = 0; i < parts.size(); i++) { - v8::Local tp_obj = Nan::New(); + Napi::Object tp_obj = Napi::Object::New(env); event_topic_partition_t tp = parts[i]; - Nan::Set(tp_obj, Nan::New("topic").ToLocalChecked(), - Nan::New(tp.topic.c_str()).ToLocalChecked()); - Nan::Set(tp_obj, Nan::New("partition").ToLocalChecked(), - Nan::New(tp.partition)); + (tp_obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, tp.topic.c_str())); + (tp_obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, tp.partition)); if (tp.offset >= 0) { - Nan::Set(tp_obj, Nan::New("offset").ToLocalChecked(), - Nan::New(tp.offset)); + (tp_obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, tp.offset)); } - Nan::Set(tp_array, i, tp_obj); + (tp_array).Set(i, tp_obj); } return tp_array; @@ -85,7 +84,7 @@ void Dispatcher::AsyncHandleCloseCallback(uv_handle_t *handle) { void Dispatcher::Deactivate() { if (async) { uv_close(reinterpret_cast(async), - Dispatcher::AsyncHandleCloseCallback); + Dispatcher::AsyncHandleCloseCallback); async = NULL; } } @@ -100,7 +99,7 @@ void Dispatcher::Execute() { } } -void Dispatcher::Dispatch(const int _argc, Local _argv[]) { +void Dispatcher::Dispatch(const int _argc, Napi::Value _argv[]) { // This should probably be an array of v8 values if (!HasCallbacks()) { return; @@ -111,15 +110,15 @@ void Dispatcher::Dispatch(const int _argc, Local _argv[]) { } } -void Dispatcher::AddCallback(const v8::Local &cb) { - Nan::Callback *value = new Nan::Callback(cb); +void Dispatcher::AddCallback(const Napi::Function &cb) { + Napi::FunctionReference *value = new Napi::FunctionReference(cb); callbacks.push_back(value); } -void Dispatcher::RemoveCallback(const v8::Local &cb) { +void Dispatcher::RemoveCallback(const Napi::Function &cb) { for (size_t i=0; i < callbacks.size(); i++) { if (callbacks[i]->GetFunction() == cb) { - Nan::Callback *found_callback = callbacks[i]; + Napi::FunctionReference *found_callback = callbacks[i]; callbacks.erase(callbacks.begin() + i); delete found_callback; break; @@ -187,7 +186,7 @@ void EventDispatcher::Add(const event_t &e) { } void EventDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -202,58 +201,58 @@ void EventDispatcher::Flush() { } for (size_t i=0; i < _events.size(); i++) { - Local argv[argc] = {}; - Local jsobj = Nan::New(); + Napi::Value argv[argc] = {}; + Napi::Object jsobj = Napi::Object::New(env); switch (_events[i].type) { case RdKafka::Event::EVENT_ERROR: - argv[0] = Nan::New("error").ToLocalChecked(); - argv[1] = Nan::Error(_events[i].message.c_str()); + argv[0] = Napi::String::New(env, "error"); + argv[1] = Napi::Error::New(env, _events[i].message.c_str()); - // if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running - // This may be better suited to the node side of things - break; + // if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running + // This may be better suited to the node side of things + break; case RdKafka::Event::EVENT_STATS: - argv[0] = Nan::New("stats").ToLocalChecked(); + argv[0] = Napi::String::New(env, "stats"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::String::New(env, _events[i].message.c_str())); - break; + break; case RdKafka::Event::EVENT_LOG: - argv[0] = Nan::New("log").ToLocalChecked(); - - Nan::Set(jsobj, Nan::New("severity").ToLocalChecked(), - Nan::New(_events[i].severity)); - Nan::Set(jsobj, Nan::New("fac").ToLocalChecked(), - Nan::New(_events[i].fac.c_str()).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("name").ToLocalChecked(), - Nan::New(this->client_name.c_str()).ToLocalChecked()); - - break; + argv[0] = Napi::String::New(env, "log"); + + (jsobj).Set(Napi::String::New(env, "severity"), + Napi::New(env, _events[i].severity)); + (jsobj).Set(Napi::String::New(env, "fac"), + Napi::New(env, _events[i].fac.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "name"), + Napi::New(env, this->client_name.c_str())); + + break; case RdKafka::Event::EVENT_THROTTLE: - argv[0] = Nan::New("throttle").ToLocalChecked(); + argv[0] = Napi::String::New(env, "throttle"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); - Nan::Set(jsobj, Nan::New("throttleTime").ToLocalChecked(), - Nan::New(_events[i].throttle_time)); - Nan::Set(jsobj, Nan::New("brokerName").ToLocalChecked(), - Nan::New(_events[i].broker_name).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("brokerId").ToLocalChecked(), - Nan::New(_events[i].broker_id)); + (jsobj).Set(Napi::String::New(env, "throttleTime"), + Napi::New(env, _events[i].throttle_time)); + (jsobj).Set(Napi::String::New(env, "brokerName"), + Napi::New(env, _events[i].broker_name)); + (jsobj).Set(Napi::String::New(env, "brokerId"), + Napi::Number::New(env, _events[i].broker_id)); - break; + break; default: - argv[0] = Nan::New("event").ToLocalChecked(); + argv[0] = Napi::String::New(env, "event"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, events[i].message.c_str())); - break; + break; } if (_events[i].type != RdKafka::Event::EVENT_ERROR) { @@ -279,7 +278,7 @@ size_t DeliveryReportDispatcher::Add(const DeliveryReport &e) { } void DeliveryReportDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 2; @@ -297,41 +296,41 @@ void DeliveryReportDispatcher::Flush() { } for (size_t i = 0; i < events_list.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; const DeliveryReport& event = events_list[i]; if (event.is_error) { - // If it is an error we need the first argument to be set - argv[0] = Nan::New(event.error_code); + // If it is an error we need the first argument to be set + argv[0] = Napi::New(env, event.error_code); } else { - argv[0] = Nan::Null(); + argv[0] = env.Null(); } - Local jsobj(Nan::New()); + Napi::Object jsobj(Napi::Object::New(env)); - Nan::Set(jsobj, Nan::New("topic").ToLocalChecked(), - Nan::New(event.topic_name).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("partition").ToLocalChecked(), - Nan::New(event.partition)); - Nan::Set(jsobj, Nan::New("offset").ToLocalChecked(), - Nan::New(event.offset)); + (jsobj).Set(Napi::String::New(env, "topic"), + Napi::New(env, event.topic_name)); + (jsobj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, event.partition)); + (jsobj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, event.offset)); if (event.key) { - Nan::MaybeLocal buff = Nan::NewBuffer( - static_cast(event.key), - static_cast(event.key_len)); + Napi::MaybeLocal buff = Napi::Buffer::New(env, + static_cast(event.key), + static_cast(event.key_len)); - Nan::Set(jsobj, Nan::New("key").ToLocalChecked(), - buff.ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "key"), + buff); } else { - Nan::Set(jsobj, Nan::New("key").ToLocalChecked(), Nan::Null()); + (jsobj).Set(Napi::String::New(env, "key"), env.Null()); } if (event.opaque) { - Nan::Persistent * persistent = - static_cast *>(event.opaque); - v8::Local object = Nan::New(*persistent); - Nan::Set(jsobj, Nan::New("opaque").ToLocalChecked(), object); + Napi::Persistent * persistent = + static_cast *>(event.opaque); + Napi::Value object = Napi::New(env, *persistent); + (jsobj).Set(Napi::String::New(env, "opaque"), object); // Okay... now reset and destroy the persistent handle persistent->Reset(); @@ -341,26 +340,26 @@ void DeliveryReportDispatcher::Flush() { } if (event.timestamp > -1) { - Nan::Set(jsobj, Nan::New("timestamp").ToLocalChecked(), - Nan::New(event.timestamp)); + (jsobj).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, event.timestamp)); } if (event.m_include_payload) { if (event.payload) { - Nan::MaybeLocal buff = Nan::NewBuffer( - static_cast(event.payload), - static_cast(event.len)); + Napi::MaybeLocal buff = Napi::Buffer::New(env, + static_cast(event.payload), + static_cast(event.len)); - Nan::Set(jsobj, Nan::New("value").ToLocalChecked(), - buff.ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "value"), + buff); } else { - Nan::Set(jsobj, Nan::New("value").ToLocalChecked(), - Nan::Null()); + (jsobj).Set(Napi::String::New(env, "value"), + env.Null()); } } - Nan::Set(jsobj, Nan::New("size").ToLocalChecked(), - Nan::New(event.len)); + (jsobj).Set(Napi::String::New(env, "size"), + Napi::Number::New(env, event.len)); argv[1] = jsobj; @@ -418,7 +417,7 @@ DeliveryReport::DeliveryReport(RdKafka::Message &message, bool include_payload) len = message.len(); if (m_include_payload && message.payload()) { - // this pointer will be owned and freed by the Nan::NewBuffer + // this pointer will be owned and freed by the Napi::NewBuffer // created in DeliveryReportDispatcher::Flush() payload = malloc(len); memcpy(payload, message.payload(), len); @@ -464,7 +463,7 @@ void RebalanceDispatcher::Add(const rebalance_event_t &e) { } void RebalanceDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -480,13 +479,13 @@ void RebalanceDispatcher::Flush() { } for (size_t i=0; i < events.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; if (events[i].err == RdKafka::ERR_NO_ERROR) { - argv[0] = Nan::Undefined(); + argv[0] = env.Undefined(); } else { // ERR__ASSIGN_PARTITIONS? Special case? Nah - argv[0] = Nan::New(events[i].err); + argv[0] = Napi::New(env, events[i].err); } std::vector parts = events[i].partitions; @@ -515,7 +514,7 @@ void OffsetCommitDispatcher::Add(const offset_commit_event_t &e) { } void OffsetCommitDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -531,12 +530,12 @@ void OffsetCommitDispatcher::Flush() { } for (size_t i = 0; i < events.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; if (events[i].err == RdKafka::ERR_NO_ERROR) { - argv[0] = Nan::Undefined(); + argv[0] = env.Undefined(); } else { - argv[0] = Nan::New(events[i].err); + argv[0] = Napi::New(env, events[i].err); } // Now convert the TopicPartition list to a JS array @@ -560,7 +559,7 @@ void OAuthBearerTokenRefreshDispatcher::Add( } void OAuthBearerTokenRefreshDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 1; @@ -571,8 +570,8 @@ void OAuthBearerTokenRefreshDispatcher::Flush() { m_oauthbearer_config.clear(); } - v8::Local argv[argc] = {}; - argv[0] = Nan::New(oauthbearer_config.c_str()).ToLocalChecked(); + Napi::Value argv[argc] = {}; + argv[0] = Napi::String::New(env, oauthbearer_config.c_str()); Dispatch(argc, argv); } @@ -589,36 +588,36 @@ Partitioner::Partitioner() {} Partitioner::~Partitioner() {} int32_t Partitioner::partitioner_cb(const RdKafka::Topic *topic, - const std::string *key, - int32_t partition_cnt, - void *msg_opaque) { + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { // Send this and get the callback and parse the int if (callback.IsEmpty()) { // default behavior return random(topic, partition_cnt); } - Local argv[3] = {}; + Napi::Value argv[3] = {}; - argv[0] = Nan::New(topic->name().c_str()).ToLocalChecked(); + argv[0] = Napi::String::New(env, topic->name().c_str()); if (key->empty()) { - argv[1] = Nan::Null(); + argv[1] = env.Null(); } else { - argv[1] = Nan::New(key->c_str()).ToLocalChecked(); + argv[1] = Napi::String::New(env, key->c_str()); } - argv[2] = Nan::New(partition_cnt); + argv[2] = Napi::Int32::New(env, partition_cnt); - v8::Local return_value = callback.Call(3, argv); + Napi::Value return_value = callback.Call(3, argv); - Nan::Maybe partition_return = Nan::To(return_value); + Napi::Maybe partition_return = return_value.As().Int32Value(); int32_t chosen_partition; if (partition_return.IsNothing()) { chosen_partition = RdKafka::Topic::PARTITION_UA; } else { - chosen_partition = partition_return.FromJust(); + chosen_partition = partition_return; } if (!topic->partition_available(chosen_partition)) { @@ -645,7 +644,7 @@ unsigned int Partitioner::random(const RdKafka::Topic *topic, int32_t max) { } } -void Partitioner::SetCallback(v8::Local cb) { +void Partitioner::SetCallback(Napi::Function cb) { callback(cb); } @@ -653,7 +652,7 @@ QueueNotEmptyDispatcher::QueueNotEmptyDispatcher() {} QueueNotEmptyDispatcher::~QueueNotEmptyDispatcher() {} void QueueNotEmptyDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 0; Dispatch(argc, nullptr); diff --git a/src/callbacks.h b/src/callbacks.h index 7c0427c8..4235668b 100644 --- a/src/callbacks.h +++ b/src/callbacks.h @@ -11,7 +11,8 @@ #define SRC_CALLBACKS_H_ #include -#include +#include +#include #include #include @@ -29,9 +30,9 @@ class Dispatcher { public: Dispatcher(); ~Dispatcher(); - void Dispatch(const int, v8::Local []); - void AddCallback(const v8::Local&); - void RemoveCallback(const v8::Local&); + void Dispatch(const int, Napi::Value []); + void AddCallback(const Napi::Function&); + void RemoveCallback(const Napi::Function&); bool HasCallbacks(); virtual void Flush() = 0; void Execute(); @@ -39,14 +40,14 @@ class Dispatcher { void Deactivate(); protected: - std::vector callbacks; // NOLINT + std::vector callbacks; // NOLINT uv_mutex_t async_lock; private: - NAN_INLINE static NAUV_WORK_CB(AsyncMessage_) { + inline static void func(uv_async_t *async) { Dispatcher *dispatcher = - static_cast(async->data); + static_cast(async->data); dispatcher->Flush(); } static void AsyncHandleCloseCallback(uv_handle_t *); @@ -165,18 +166,18 @@ struct rebalance_event_t { std::vector partitions; rebalance_event_t(RdKafka::ErrorCode p_err, - std::vector p_partitions): - err(p_err) { + std::vector p_partitions): + err(p_err) { // Iterate over the topic partitions because we won't have them later for (size_t topic_partition_i = 0; topic_partition_i < p_partitions.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = - p_partitions[topic_partition_i]; + p_partitions[topic_partition_i]; event_topic_partition_t tp( - topic_partition->topic(), - topic_partition->partition(), - topic_partition->offset()); + topic_partition->topic(), + topic_partition->partition(), + topic_partition->offset()); partitions.push_back(tp); } @@ -194,13 +195,13 @@ struct offset_commit_event_t { for (size_t topic_partition_i = 0; topic_partition_i < p_partitions.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = - p_partitions[topic_partition_i]; + p_partitions[topic_partition_i]; // Just reuse this thing because it's the same exact thing we need event_topic_partition_t tp( - topic_partition->topic(), - topic_partition->partition(), - topic_partition->offset()); + topic_partition->topic(), + topic_partition->partition(), + topic_partition->offset()); partitions.push_back(tp); } @@ -223,8 +224,8 @@ class Rebalance : public RdKafka::RebalanceCb { std::vector &); RebalanceDispatcher dispatcher; - private: - v8::Persistent m_cb; + // private: + // v8::Persistent m_cb; }; class OffsetCommitDispatcher : public Dispatcher { @@ -242,8 +243,8 @@ class OffsetCommit : public RdKafka::OffsetCommitCb { void offset_commit_cb(RdKafka::ErrorCode, std::vector &); // NOLINT OffsetCommitDispatcher dispatcher; - private: - v8::Persistent m_cb; + // private: + // v8::Persistent m_cb; }; class OAuthBearerTokenRefreshDispatcher : public Dispatcher { @@ -268,8 +269,8 @@ class Partitioner : public RdKafka::PartitionerCb { Partitioner(); ~Partitioner(); int32_t partitioner_cb( const RdKafka::Topic*, const std::string*, int32_t, void*); // NOLINT - Nan::Callback callback; // NOLINT - void SetCallback(v8::Local); + Napi::FunctionReference callback; // NOLINT + void SetCallback(Napi::Function); private: static unsigned int djb_hash(const char*, size_t); static unsigned int random(const RdKafka::Topic*, int32_t); diff --git a/src/common.cc b/src/common.cc index e488e02e..8a321def 100644 --- a/src/common.cc +++ b/src/common.cc @@ -10,7 +10,6 @@ #include "src/common.h" #include -#include #include #include @@ -21,86 +20,86 @@ void Log(std::string str) { } template -T GetParameter(v8::Local object, std::string field_name, T def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - Nan::Maybe maybeT = Nan::To(Nan::Get(object, field).ToLocalChecked()); +T GetParameter(Napi::Env& env, Napi::Object object, std::string field_name, T def) { + Napi::String field = Napi::String::New(env, field_name.c_str()); + if (object.Has(field)) { + Napi::MaybeOrValue maybeT = object.Get(field); if (maybeT.IsNothing()) { return def; } else { - return maybeT.FromJust(); + return maybeT; } } return def; } template<> -int64_t GetParameter(v8::Local object, +int64_t GetParameter(Napi::Object object, std::string field_name, int64_t def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local v = Nan::Get(object, field).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value v = (object).Get(field); - if (!v->IsNumber()) { + if (!v.IsNumber()) { return def; } - Nan::Maybe maybeInt = Nan::To(v); + Napi::Maybe maybeInt = v.As().Int64Value(); if (maybeInt.IsNothing()) { return def; } else { - return maybeInt.FromJust(); + return maybeInt; } } return def; } template<> -bool GetParameter(v8::Local object, +bool GetParameter(Napi::Object object, std::string field_name, bool def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local v = Nan::Get(object, field).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value v = (object).Get(field); if (!v->IsBoolean()) { return def; } - Nan::Maybe maybeInt = Nan::To(v); + Napi::Maybe maybeInt = v.As().Value(); if (maybeInt.IsNothing()) { return def; } else { - return maybeInt.FromJust(); + return maybeInt; } } return def; } template<> -int GetParameter(v8::Local object, +int GetParameter(Napi::Object object, std::string field_name, int def) { return static_cast(GetParameter(object, field_name, def)); } template<> -std::string GetParameter(v8::Local object, - std::string field_name, - std::string def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local parameter = - Nan::Get(object, field).ToLocalChecked(); - // Nan::To(); +std::string GetParameter(Napi::Object object, + std::string field_name, + std::string def) { + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value parameter = + (object).Get(field); + // Napi::To(); if (!parameter->IsUndefined() && !parameter->IsNull()) { - v8::Local val = Nan::To(parameter) - .ToLocalChecked(); + Napi::String val = parameter.To() + ; if (!val->IsUndefined() && !val->IsNull()) { - Nan::Utf8String parameterValue(val); - std::string parameterString(*parameterValue); + std::string parameterValue = val.As(); + std::string parameterString(*parameterValue); - return parameterString; + return parameterString; } } } @@ -109,34 +108,34 @@ std::string GetParameter(v8::Local object, template<> std::vector GetParameter >( - v8::Local object, std::string field_name, + Napi::Object object, std::string field_name, std::vector def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local maybeArray = Nan::Get(object, field).ToLocalChecked(); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value maybeArray = (object).Get(field); if (maybeArray->IsArray()) { - v8::Local parameter = maybeArray.As(); + Napi::Array parameter = maybeArray.As(); return v8ArrayToStringVector(parameter); } } return def; } -std::vector v8ArrayToStringVector(v8::Local parameter) { +std::vector v8ArrayToStringVector(Napi::Array parameter) { std::vector newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { - continue; + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { + continue; } - Nan::MaybeLocal p = Nan::To(v); + Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { - continue; + continue; } - Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); newItem.push_back(pString); } @@ -144,19 +143,19 @@ std::vector v8ArrayToStringVector(v8::Local parameter) { return newItem; } -std::list v8ArrayToStringList(v8::Local parameter) { +std::list v8ArrayToStringList(Napi::Array parameter) { std::list newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { - continue; + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { + continue; } - Nan::MaybeLocal p = Nan::To(v); + Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { - continue; + continue; } - Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); newItem.push_back(pString); } @@ -164,16 +163,16 @@ std::list v8ArrayToStringList(v8::Local parameter) { return newItem; } -template<> v8::Local GetParameter >( - v8::Local object, +template<> Napi::Array GetParameter( + Napi::Object object, std::string field_name, - v8::Local def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); + Napi::Array def) { + Napi::String field = Napi::New(env, field_name.c_str()); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local maybeArray = Nan::Get(object, field).ToLocalChecked(); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value maybeArray = (object).Get(field); if (maybeArray->IsArray()) { - v8::Local parameter = maybeArray.As(); + Napi::Array parameter = maybeArray.As(); return parameter; } } @@ -184,34 +183,34 @@ template<> v8::Local GetParameter >( namespace Conversion { namespace Util { -std::vector ToStringVector(v8::Local parameter) { +std::vector ToStringVector(Napi::Array parameter) { std::vector newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local element; - if (!Nan::Get(parameter, i).ToLocal(&element)) { - continue; + Napi::Value element; + if (!(parameter).Get(i).ToLocal(&element)) { + continue; } if (!element->IsRegExp()) { - Nan::MaybeLocal p = Nan::To(element); + Napi::MaybeLocal p = element.To(); - if (p.IsEmpty()) { - continue; - } + if (p.IsEmpty()) { + continue; + } - Nan::Utf8String pVal(p.ToLocalChecked()); - std::string pString(*pVal); + std::string pVal = p.ToLocalChecked(.As()); + std::string pString(*pVal); - newItem.push_back(pString); + newItem.push_back(pString); } else { - Nan::Utf8String pVal(element.As()->GetSource()); - std::string pString(*pVal); + std::string pVal = element.As(.As()->GetSource()); + std::string pString(*pVal); - Log(pString); + Log(pString); - newItem.push_back(pString); + newItem.push_back(pString); } } } @@ -219,12 +218,12 @@ std::vector ToStringVector(v8::Local parameter) { return newItem; } -v8::Local ToV8Array(std::vector parameter) { - v8::Local newItem = Nan::New(); +Napi::Array ToV8Array(std::vector parameter) { + Napi::Array newItem = Napi::Array::New(env); for (size_t i = 0; i < parameter.size(); i++) { std::string topic = parameter[i]; - Nan::Set(newItem, i, Nan::New(topic).ToLocalChecked()); + (newItem).Set(i, Napi::String::New(env, topic)); } return newItem; @@ -234,15 +233,15 @@ v8::Local ToV8Array(std::vector parameter) { * @brief Converts a list of rd_kafka_error_t* into a v8 array of RdKafkaError * objects. */ -v8::Local ToV8Array(const rd_kafka_error_t** error_list, - size_t error_cnt) { - v8::Local errors = Nan::New(); +Napi::Array ToV8Array(const rd_kafka_error_t** error_list, + size_t error_cnt) { + Napi::Array errors = Napi::Array::New(env); for (size_t i = 0; i < error_cnt; i++) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error_list[i])); + static_cast(rd_kafka_error_code(error_list[i])); std::string msg = std::string(rd_kafka_error_string(error_list[i])); - Nan::Set(errors, i, RdKafkaError(code, msg)); + (errors).Set(i, RdKafkaError(code, msg)); } return errors; @@ -251,7 +250,7 @@ v8::Local ToV8Array(const rd_kafka_error_t** error_list, /** * @brief Converts a rd_kafka_Node_t* into a v8 object. */ -v8::Local ToV8Object(const rd_kafka_Node_t* node) { +Napi::Object ToV8Object(const rd_kafka_Node_t* node) { /* Return object type { id: number @@ -260,19 +259,19 @@ v8::Local ToV8Object(const rd_kafka_Node_t* node) { rack?: string } */ - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); - Nan::Set(obj, Nan::New("id").ToLocalChecked(), - Nan::New(rd_kafka_Node_id(node))); - Nan::Set(obj, Nan::New("host").ToLocalChecked(), - Nan::New(rd_kafka_Node_host(node)).ToLocalChecked()); - Nan::Set(obj, Nan::New("port").ToLocalChecked(), - Nan::New(rd_kafka_Node_port(node))); + (obj).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, rd_kafka_Node_id(node))); + (obj).Set(Napi::String::New(env, "host"), + Napi::String::New(env, rd_kafka_Node_host(node))); + (obj).Set(Napi::String::New(env, "port"), + Napi::Number::New(env, rd_kafka_Node_port(node))); const char* rack = rd_kafka_Node_rack(node); if (rack) { - Nan::Set(obj, Nan::New("rack").ToLocalChecked(), - Nan::New(rack).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "rack"), + Napi::String::New(env, rack)); } return obj; @@ -281,25 +280,25 @@ v8::Local ToV8Object(const rd_kafka_Node_t* node) { /** * @brief Converts a rd_kafka_Uuid_t* into a v8 object. */ -v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid) { +Napi::Object UuidToV8Object(const rd_kafka_Uuid_t* uuid) { /*Return object type { - mostSignificantBits: bigint - leastSignificantBits: bigint - base64: string + mostSignificantBits: bigint + leastSignificantBits: bigint + base64: string } */ - v8::Local obj = Nan::New(); - - Nan::Set(obj, Nan::New("mostSignificantBits").ToLocalChecked(), - v8::BigInt::New(v8::Isolate::GetCurrent(), - rd_kafka_Uuid_most_significant_bits(uuid))); - Nan::Set(obj, Nan::New("leastSignificantBits").ToLocalChecked(), - v8::BigInt::New(v8::Isolate::GetCurrent(), - rd_kafka_Uuid_least_significant_bits(uuid))); - Nan::Set( - obj, Nan::New("base64").ToLocalChecked(), - Nan::New(rd_kafka_Uuid_base64str(uuid)).ToLocalChecked()); + Napi::Object obj = Napi::Object::New(env); + + (obj).Set(Napi::String::New(env, "mostSignificantBits"), + v8::BigInt::New(v8::Isolate::GetCurrent(), + rd_kafka_Uuid_most_significant_bits(uuid))); + (obj).Set(Napi::String::New(env, "leastSignificantBits"), + v8::BigInt::New(v8::Isolate::GetCurrent(), + rd_kafka_Uuid_least_significant_bits(uuid))); + ( + obj).Set(Napi::String::New(env, "base64"), + Napi::String::New(env, rd_kafka_Uuid_base64str(uuid))); return obj; } @@ -307,13 +306,13 @@ v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid) { /** * @brief Converts a list of rd_kafka_AclOperation_t into a v8 array. */ -v8::Local ToV8Array( +Napi::Array ToV8Array( const rd_kafka_AclOperation_t* authorized_operations, size_t authorized_operations_cnt) { - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (size_t i = 0; i < authorized_operations_cnt; i++) { - Nan::Set(array, i, Nan::New(authorized_operations[i])); + (array).Set(i, Napi::Number::New(env, authorized_operations[i])); } return array; @@ -333,9 +332,9 @@ namespace TopicPartition { * use `ToTopicPartitionV8Array(const rd_kafka_topic_partition_list_t*, * bool)`. */ -v8::Local ToV8Array( +Napi::Array ToV8Array( std::vector & topic_partition_list) { // NOLINT - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (size_t topic_partition_i = 0; topic_partition_i < topic_partition_list.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = @@ -344,42 +343,42 @@ v8::Local ToV8Array( // TODO: why do we set the entire array element to be an error rather adding // an error field to TopicPartition? Or create a TopicPartitionError? if (topic_partition->err() != RdKafka::ErrorCode::ERR_NO_ERROR) { - Nan::Set(array, topic_partition_i, - Nan::Error(Nan::New(RdKafka::err2str(topic_partition->err())) - .ToLocalChecked())); + (array).Set(topic_partition_i, + Napi::Error::New(env, Napi::New(env, RdKafka::err2str(topic_partition->err())) + )); } else { // We have the list now let's get the properties from it - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); if (topic_partition->offset() != RdKafka::Topic::OFFSET_INVALID) { - Nan::Set(obj, Nan::New("offset").ToLocalChecked(), - Nan::New(topic_partition->offset())); + (obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, topic_partition->offset())); } // If present, size >= 1, since it will include at least the // null terminator. if (topic_partition->get_metadata().size() > 0) { - Nan::Set(obj, Nan::New("metadata").ToLocalChecked(), - Nan::New( - reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT - // null terminator is not required by the constructor. - topic_partition->get_metadata().size() - 1) - .ToLocalChecked()); + (obj).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, + reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT + // null terminator is not required by the constructor. + topic_partition->get_metadata().size() - 1) + ); } - Nan::Set(obj, Nan::New("partition").ToLocalChecked(), - Nan::New(topic_partition->partition())); - Nan::Set(obj, Nan::New("topic").ToLocalChecked(), - Nan::New(topic_partition->topic().c_str()) - .ToLocalChecked()); + (obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, topic_partition->partition())); + (obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, topic_partition->topic().c_str()) + ); int leader_epoch = topic_partition->get_leader_epoch(); if (leader_epoch >= 0) { - Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (obj).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(array, topic_partition_i, obj); + (array).Set(topic_partition_i, obj); } } @@ -398,41 +397,41 @@ v8::Local ToV8Array( * array elements, unlike the `ToV8Array(std::vector & * topic_partition_list)`. */ -v8::Local ToTopicPartitionV8Array( +Napi::Array ToTopicPartitionV8Array( const rd_kafka_topic_partition_list_t* topic_partition_list, bool include_offset) { - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (int topic_partition_i = 0; topic_partition_i < topic_partition_list->cnt; topic_partition_i++) { rd_kafka_topic_partition_t topic_partition = - topic_partition_list->elems[topic_partition_i]; - v8::Local obj = Nan::New(); + topic_partition_list->elems[topic_partition_i]; + Napi::Object obj = Napi::Object::New(env); - Nan::Set(obj, Nan::New("partition").ToLocalChecked(), - Nan::New(topic_partition.partition)); - Nan::Set(obj, Nan::New("topic").ToLocalChecked(), - Nan::New(topic_partition.topic).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, topic_partition.partition)); + (obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, topic_partition.topic)); if (topic_partition.err != RD_KAFKA_RESP_ERR_NO_ERROR) { - v8::Local error = NodeKafka::RdKafkaError( - static_cast(topic_partition.err)); - Nan::Set(obj, Nan::New("error").ToLocalChecked(), error); + Napi::Object error = NodeKafka::RdKafkaError( + static_cast(topic_partition.err)); + (obj).Set(Napi::String::New(env, "error"), error); } if (include_offset) { - Nan::Set(obj, Nan::New("offset").ToLocalChecked(), - Nan::New(topic_partition.offset)); + (obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, topic_partition.offset)); } int leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(&topic_partition); + rd_kafka_topic_partition_get_leader_epoch(&topic_partition); if (leader_epoch >= 0) { - Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (obj).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(array, topic_partition_i, obj); + (array).Set(topic_partition_i, obj); } return array; } @@ -445,21 +444,21 @@ v8::Local ToTopicPartitionV8Array( * @note You must delete all the pointers inside here when you are done!! */ std::vector FromV8Array( - const v8::Local & topic_partition_list) { + const Napi::Array & topic_partition_list) { // NOTE: ARRAY OF POINTERS! DELETE THEM WHEN YOU ARE FINISHED std::vector array; for (size_t topic_partition_i = 0; topic_partition_i < topic_partition_list->Length(); topic_partition_i++) { - v8::Local topic_partition_value; - if (!Nan::Get(topic_partition_list, topic_partition_i) - .ToLocal(&topic_partition_value)) { + Napi::Value topic_partition_value; + if (!(topic_partition_list).Get(topic_partition_i) + .ToLocal(&topic_partition_value)) { continue; } - if (topic_partition_value->IsObject()) { + if (topic_partition_value.IsObject()) { array.push_back(FromV8Object( - Nan::To(topic_partition_value).ToLocalChecked())); + topic_partition_value.To())); } } @@ -473,27 +472,27 @@ std::vector FromV8Array( * offset?: number}] to a rd_kafka_topic_partition_list_t */ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( - v8::Local parameter, bool include_offset) { + Napi::Array parameter, bool include_offset) { rd_kafka_topic_partition_list_t* newList = rd_kafka_topic_partition_list_new(parameter->Length()); for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - if (!v->IsObject()) { + if (!v.IsObject()) { return NULL; // Return NULL to indicate an error } - v8::Local item = v.As(); + Napi::Object item = v.As(); std::string topic = GetParameter(item, "topic", ""); int partition = GetParameter(item, "partition", -1); rd_kafka_topic_partition_t* toppar = - rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); + rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); if (include_offset) { int64_t offset = GetParameter(item, "offset", 0); @@ -506,37 +505,37 @@ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( /** * @brief v8 Array of Topic Partitions with offsetspec to * rd_kafka_topic_partition_list_t - * + * * @note Converts a v8 array of type [{topic: string, partition: number, * offset: {timestamp: number}}] to a rd_kafka_topic_partition_list_t */ rd_kafka_topic_partition_list_t* TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( - v8::Local parameter) { + Napi::Array parameter) { rd_kafka_topic_partition_list_t* newList = rd_kafka_topic_partition_list_new(parameter->Length()); for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - if (!v->IsObject()) { + if (!v.IsObject()) { return NULL; // Return NULL to indicate an error } - v8::Local item = v.As(); + Napi::Object item = v.As(); std::string topic = GetParameter(item, "topic", ""); int partition = GetParameter(item, "partition", -1); rd_kafka_topic_partition_t* toppar = - rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); + rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); - v8::Local offsetValue = - Nan::Get(item, Nan::New("offset").ToLocalChecked()).ToLocalChecked(); - v8::Local offsetObject = offsetValue.As(); + Napi::Value offsetValue = + (item).Get(Napi::String::New(env, "offset")); + Napi::Object offsetObject = offsetValue.As(); int64_t offset = GetParameter(offsetObject, "timestamp", 0); toppar->offset = offset; @@ -548,7 +547,7 @@ TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( * @brief v8::Object to RdKafka::TopicPartition * */ -RdKafka::TopicPartition * FromV8Object(v8::Local topic_partition) { +RdKafka::TopicPartition * FromV8Object(Napi::Object topic_partition) { std::string topic = GetParameter(topic_partition, "topic", ""); int partition = GetParameter(topic_partition, "partition", -1); int64_t offset = GetParameter(topic_partition, "offset", 0); @@ -564,31 +563,31 @@ return NULL; RdKafka::TopicPartition *toppar = RdKafka::TopicPartition::create(topic, partition, offset); - v8::Local metadataKey = Nan::New("metadata").ToLocalChecked(); - if (Nan::Has(topic_partition, metadataKey).FromMaybe(false)) { - v8::Local metadataValue = - Nan::Get(topic_partition, metadataKey).ToLocalChecked(); + Napi::String metadataKey = Napi::String::New(env, "metadata"); + if ((topic_partition).Has(metadataKey).FromMaybe(false)) { + Napi::Value metadataValue = + (topic_partition).Get(metadataKey); - if (metadataValue->IsString()) { - Nan::Utf8String metadataValueUtf8Str(metadataValue.As()); + if (metadataValue.IsString()) { + std::string metadataValueUtf8Str = metadataValue.As(.As()); std::string metadataValueStr(*metadataValueUtf8Str); std::vector metadataVector(metadataValueStr.begin(), - metadataValueStr.end()); + metadataValueStr.end()); metadataVector.push_back( - '\0'); // The null terminator is not included in the iterator. + '\0'); // The null terminator is not included in the iterator. toppar->set_metadata(metadataVector); } } toppar->set_leader_epoch(-1); - v8::Local leaderEpochKey = - Nan::New("leaderEpoch").ToLocalChecked(); - if (Nan::Has(topic_partition, leaderEpochKey).FromMaybe(false)) { - v8::Local leaderEpochValue = - Nan::Get(topic_partition, leaderEpochKey).ToLocalChecked(); - - if (leaderEpochValue->IsNumber()) { - int32_t leaderEpoch = Nan::To(leaderEpochValue).FromJust(); + Napi::String leaderEpochKey = + Napi::String::New(env, "leaderEpoch"); + if ((topic_partition).Has(leaderEpochKey).FromMaybe(false)) { + Napi::Value leaderEpochValue = + (topic_partition).Get(leaderEpochKey); + + if (leaderEpochValue.IsNumber()) { + int32_t leaderEpoch = leaderEpochValue.As().Int32Value(); toppar->set_leader_epoch(leaderEpoch); } } @@ -604,11 +603,11 @@ namespace Metadata { * @brief RdKafka::Metadata to v8::Object * */ -v8::Local ToV8Object(RdKafka::Metadata* metadata) { - v8::Local obj = Nan::New(); +Napi::Object ToV8Object(RdKafka::Metadata* metadata) { + Napi::Object obj = Napi::Object::New(env); - v8::Local broker_data = Nan::New(); - v8::Local topic_data = Nan::New(); + Napi::Array broker_data = Napi::Array::New(env); + Napi::Array topic_data = Napi::Array::New(env); const BrokerMetadataList* brokers = metadata->brokers(); // NOLINT @@ -620,16 +619,16 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { const RdKafka::BrokerMetadata* x = *it; - v8::Local current_broker = Nan::New(); + Napi::Object current_broker = Napi::Object::New(env); - Nan::Set(current_broker, Nan::New("id").ToLocalChecked(), - Nan::New(x->id())); - Nan::Set(current_broker, Nan::New("host").ToLocalChecked(), - Nan::New(x->host().c_str()).ToLocalChecked()); - Nan::Set(current_broker, Nan::New("port").ToLocalChecked(), - Nan::New(x->port())); + (current_broker).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, x->id())); + (current_broker).Set(Napi::String::New(env, "host"), + Napi::String::New(env, x->host().c_str())); + (current_broker).Set(Napi::String::New(env, "port"), + Napi::Number::New(env, x->port())); - Nan::Set(broker_data, broker_i, current_broker); + (broker_data).Set(broker_i, current_broker); } unsigned int topic_i = 0; @@ -642,12 +641,12 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { const RdKafka::TopicMetadata* x = *it; - v8::Local current_topic = Nan::New(); + Napi::Object current_topic = Napi::Object::New(env); - Nan::Set(current_topic, Nan::New("name").ToLocalChecked(), - Nan::New(x->topic().c_str()).ToLocalChecked()); + (current_topic).Set(Napi::String::New(env, "name"), + Napi::String::New(env, x->topic().c_str())); - v8::Local current_topic_partitions = Nan::New(); + Napi::Array current_topic_partitions = Napi::Array::New(env); const PartitionMetadataList* current_partition_data = x->partitions(); @@ -659,12 +658,12 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { // partition iterate const RdKafka::PartitionMetadata* xx = *itt; - v8::Local current_partition = Nan::New(); + Napi::Object current_partition = Napi::Object::New(env); - Nan::Set(current_partition, Nan::New("id").ToLocalChecked(), - Nan::New(xx->id())); - Nan::Set(current_partition, Nan::New("leader").ToLocalChecked(), - Nan::New(xx->leader())); + (current_partition).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, xx->id())); + (current_partition).Set(Napi::String::New(env, "leader"), + Napi::Number::New(env, xx->leader())); const std::vector * replicas = xx->replicas(); const std::vector * isrs = xx->isrs(); @@ -675,40 +674,40 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { unsigned int r_i = 0; unsigned int i_i = 0; - v8::Local current_replicas = Nan::New(); + Napi::Array current_replicas = Napi::Array::New(env); for (r_it = replicas->begin(); r_it != replicas->end(); ++r_it, r_i++) { - Nan::Set(current_replicas, r_i, Nan::New(*r_it)); + (current_replicas).Set(r_i, Napi::Int32::New(env, *r_it)); } - v8::Local current_isrs = Nan::New(); + Napi::Array current_isrs = Napi::Array::New(env); for (i_it = isrs->begin(); i_it != isrs->end(); ++i_it, i_i++) { - Nan::Set(current_isrs, i_i, Nan::New(*i_it)); + (current_isrs).Set(i_i, Napi::Int32::New(env, *i_it)); } - Nan::Set(current_partition, Nan::New("replicas").ToLocalChecked(), - current_replicas); - Nan::Set(current_partition, Nan::New("isrs").ToLocalChecked(), - current_isrs); + (current_partition).Set(Napi::String::New(env, "replicas"), + current_replicas); + (current_partition).Set(Napi::String::New(env, "isrs"), + current_isrs); - Nan::Set(current_topic_partitions, partition_i, current_partition); + (current_topic_partitions).Set(partition_i, current_partition); } // iterate over partitions - Nan::Set(current_topic, Nan::New("partitions").ToLocalChecked(), + (current_topic).Set(Napi::String::New(env, "partitions"), current_topic_partitions); - Nan::Set(topic_data, topic_i, current_topic); + (topic_data).Set(topic_i, current_topic); } // End iterating over topics - Nan::Set(obj, Nan::New("orig_broker_id").ToLocalChecked(), - Nan::New(metadata->orig_broker_id())); + (obj).Set(Napi::String::New(env, "orig_broker_id"), + Napi::Number::New(env, metadata->orig_broker_id())); - Nan::Set(obj, Nan::New("orig_broker_name").ToLocalChecked(), - Nan::New(metadata->orig_broker_name()).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "orig_broker_name"), + Napi::String::New(env, metadata->orig_broker_name())); - Nan::Set(obj, Nan::New("topics").ToLocalChecked(), topic_data); - Nan::Set(obj, Nan::New("brokers").ToLocalChecked(), broker_data); + (obj).Set(Napi::String::New(env, "topics"), topic_data); + (obj).Set(Napi::String::New(env, "brokers"), broker_data); return obj; } @@ -718,75 +717,74 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { namespace Message { // Overload for all use cases except delivery reports -v8::Local ToV8Object(RdKafka::Message *message) { +Napi::Object ToV8Object(RdKafka::Message *message) { return ToV8Object(message, true, true); } -v8::Local ToV8Object(RdKafka::Message *message, - bool include_payload, - bool include_headers) { +Napi::Object ToV8Object(RdKafka::Message *message, + bool include_payload, + bool include_headers) { if (message->err() == RdKafka::ERR_NO_ERROR) { - v8::Local pack = Nan::New(); + Napi::Object pack = Napi::Object::New(env); const void* message_payload = message->payload(); if (!include_payload) { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Undefined()); + (pack).Set(Napi::String::New(env, "value"), + env.Undefined()); } else if (message_payload) { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Encode(message_payload, message->len(), Nan::Encoding::BUFFER)); + (pack).Set(Napi::String::New(env, "value"), + Napi::Encode(message_payload, message->len(), Napi::Encoding::BUFFER)); } else { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Null()); + (pack).Set(Napi::String::New(env, "value"), + env.Null()); } RdKafka::Headers* headers; if (((headers = message->headers()) != 0) && include_headers) { - v8::Local v8headers = Nan::New(); + Napi::Array v8headers = Napi::Array::New(env); int index = 0; std::vector all = headers->get_all(); for (std::vector::iterator it = all.begin(); - it != all.end(); it++) { - v8::Local v8header = Nan::New(); - Nan::Set(v8header, Nan::New(it->key()).ToLocalChecked(), - Nan::Encode(it->value_string(), - it->value_size(), Nan::Encoding::BUFFER)); - Nan::Set(v8headers, index, v8header); - index++; + it != all.end(); it++) { + Napi::Object v8header = Napi::Object::New(env); + (v8header).Set(Napi::String::New(env, it->key()), + Napi::Encode(it->value_string(), + it->value_size(), Napi::Encoding::BUFFER)); + (v8headers).Set(index, v8header); + index++; } - Nan::Set(pack, - Nan::New("headers").ToLocalChecked(), v8headers); + (pack).Set(Napi::String::New(env, "headers"), v8headers); } - Nan::Set(pack, Nan::New("size").ToLocalChecked(), - Nan::New(message->len())); + (pack).Set(Napi::String::New(env, "size"), + Napi::Number::New(env, message->len())); const void* key_payload = message->key_pointer(); if (key_payload) { // We want this to also be a buffer to avoid corruption // https://github.com/confluentinc/confluent-kafka-javascript/issues/208 - Nan::Set(pack, Nan::New("key").ToLocalChecked(), - Nan::Encode(key_payload, message->key_len(), Nan::Encoding::BUFFER)); + (pack).Set(Napi::String::New(env, "key"), + Napi::Encode(key_payload, message->key_len(), Napi::Encoding::BUFFER)); } else { - Nan::Set(pack, Nan::New("key").ToLocalChecked(), - Nan::Null()); + (pack).Set(Napi::String::New(env, "key"), + env.Null()); } - Nan::Set(pack, Nan::New("topic").ToLocalChecked(), - Nan::New(message->topic_name()).ToLocalChecked()); - Nan::Set(pack, Nan::New("offset").ToLocalChecked(), - Nan::New(message->offset())); - Nan::Set(pack, Nan::New("partition").ToLocalChecked(), - Nan::New(message->partition())); - Nan::Set(pack, Nan::New("timestamp").ToLocalChecked(), - Nan::New(message->timestamp().timestamp)); + (pack).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, message->topic_name())); + (pack).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, message->offset())); + (pack).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, message->partition())); + (pack).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, message->timestamp().timestamp)); int32_t leader_epoch = message->leader_epoch(); if (leader_epoch >= 0) { - Nan::Set(pack, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (pack).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } return pack; @@ -809,7 +807,7 @@ namespace Admin { * */ rd_kafka_NewTopic_t* FromV8TopicObject( - v8::Local object, std::string &errstr) { // NOLINT + Napi::Object object, std::string &errstr) { // NOLINT std::string topic_name = GetParameter(object, "topic", ""); int num_partitions = GetParameter(object, "num_partitions", 0); int replication_factor = GetParameter(object, "replication_factor", 0); @@ -830,44 +828,44 @@ rd_kafka_NewTopic_t* FromV8TopicObject( rd_kafka_resp_err_t err; - if (Nan::Has(object, Nan::New("config").ToLocalChecked()).FromMaybe(false)) { + if ((object).Has(Napi::String::New(env, "config")).FromMaybe(false)) { // Get the config v8::Object that we can get parameters on - v8::Local config = - Nan::Get(object, Nan::New("config").ToLocalChecked()) - .ToLocalChecked().As(); + Napi::Object config = + (object).Get(Napi::String::New(env, "config")) + .As(); // Get all of the keys of the object - v8::MaybeLocal config_keys = Nan::GetOwnPropertyNames(config); + v8::MaybeLocal config_keys = Napi::GetOwnPropertyNames(config); if (!config_keys.IsEmpty()) { - v8::Local field_array = config_keys.ToLocalChecked(); + Napi::Array field_array = config_keys; for (size_t i = 0; i < field_array->Length(); i++) { - v8::Local config_key = Nan::Get(field_array, i) - .ToLocalChecked().As(); - v8::Local config_value = Nan::Get(config, config_key) - .ToLocalChecked(); - - // If the config value is a string... - if (config_value->IsString()) { - Nan::Utf8String pKeyVal(config_key); - std::string pKeyString(*pKeyVal); - - Nan::Utf8String pValueVal(config_value.As()); - std::string pValString(*pValueVal); - - err = rd_kafka_NewTopic_set_config( - new_topic, pKeyString.c_str(), pValString.c_str()); - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { - errstr = rd_kafka_err2str(err); - rd_kafka_NewTopic_destroy(new_topic); - return NULL; - } - } else { - errstr = "Config values must all be provided as strings."; - rd_kafka_NewTopic_destroy(new_topic); - return NULL; - } + Napi::String config_key = (field_array).Get(i) + .As(); + Napi::Value config_value = (config).Get(config_key) + ; + + // If the config value is a string... + if (config_value.IsString()) { + std::string pKeyVal = config_key.As(); + std::string pKeyString(*pKeyVal); + + std::string pValueVal = config_value.As(.As()); + std::string pValString(*pValueVal); + + err = rd_kafka_NewTopic_set_config( + new_topic, pKeyString.c_str(), pValString.c_str()); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + errstr = rd_kafka_err2str(err); + rd_kafka_NewTopic_destroy(new_topic); + return NULL; + } + } else { + errstr = "Config values must all be provided as strings."; + rd_kafka_NewTopic_destroy(new_topic); + return NULL; + } } } } @@ -875,7 +873,7 @@ rd_kafka_NewTopic_t* FromV8TopicObject( return new_topic; } -rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local) { +rd_kafka_NewTopic_t** FromV8TopicObjectArray(Napi::Array) { return NULL; } @@ -884,25 +882,25 @@ rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local) { * rd_kafka_consumer_group_state_t. */ std::vector FromV8GroupStateArray( - v8::Local array) { - v8::Local parameter = array.As(); + Napi::Array array) { + Napi::Array parameter = array.As(); std::vector returnVec; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { - continue; + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { + continue; } - Nan::Maybe maybeT = Nan::To(v); + Napi::Maybe maybeT = v.As().Int64Value(); if (maybeT.IsNothing()) { - continue; + continue; } - int64_t state_number = maybeT.FromJust(); + int64_t state_number = maybeT; if (state_number >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { - continue; + continue; } returnVec.push_back( - static_cast(state_number)); + static_cast(state_number)); } } return returnVec; @@ -911,108 +909,108 @@ std::vector FromV8GroupStateArray( /** * @brief Converts a rd_kafka_ListConsumerGroups_result_t* into a v8 object. */ -v8::Local FromListConsumerGroupsResult( +Napi::Object FromListConsumerGroupsResult( const rd_kafka_ListConsumerGroups_result_t* result) { /* Return object type: { groups: { - groupId: string, - protocolType: string, - isSimpleConsumerGroup: boolean, - state: ConsumerGroupState (internally a number) + groupId: string, + protocolType: string, + isSimpleConsumerGroup: boolean, + state: ConsumerGroupState (internally a number) }[], errors: LibrdKafkaError[] } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); size_t error_cnt; const rd_kafka_error_t** error_list = rd_kafka_ListConsumerGroups_result_errors(result, &error_cnt); - Nan::Set(returnObject, Nan::New("errors").ToLocalChecked(), - Conversion::Util::ToV8Array(error_list, error_cnt)); + (returnObject).Set(Napi::String::New(env, "errors"), + Conversion::Util::ToV8Array(error_list, error_cnt)); - v8::Local groups = Nan::New(); + Napi::Array groups = Napi::Array::New(env); size_t groups_cnt; const rd_kafka_ConsumerGroupListing_t** groups_list = rd_kafka_ListConsumerGroups_result_valid(result, &groups_cnt); for (size_t i = 0; i < groups_cnt; i++) { const rd_kafka_ConsumerGroupListing_t* group = groups_list[i]; - v8::Local groupObject = Nan::New(); + Napi::Object groupObject = Napi::Object::New(env); - Nan::Set(groupObject, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupListing_group_id(group)) - .ToLocalChecked()); + (groupObject).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_ConsumerGroupListing_group_id(group)) + ); bool is_simple = - rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); - Nan::Set(groupObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), - Nan::New(is_simple)); + rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); + (groupObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), + Napi::Boolean::New(env, is_simple)); std::string protocol_type = is_simple ? "simple" : "consumer"; - Nan::Set(groupObject, Nan::New("protocolType").ToLocalChecked(), - Nan::New(protocol_type).ToLocalChecked()); + (groupObject).Set(Napi::String::New(env, "protocolType"), + Napi::String::New(env, protocol_type)); - Nan::Set(groupObject, Nan::New("state").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupListing_state(group))); + (groupObject).Set(Napi::String::New(env, "state"), + Napi::Number::New(env, rd_kafka_ConsumerGroupListing_state(group))); - Nan::Set(groups, i, groupObject); + (groups).Set(i, groupObject); } - Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + (returnObject).Set(Napi::String::New(env, "groups"), groups); return returnObject; } /** * @brief Converts a rd_kafka_MemberDescription_t* into a v8 object. */ -v8::Local FromMemberDescription( +Napi::Object FromMemberDescription( const rd_kafka_MemberDescription_t* member) { /* Return object type: { - clientHost: string - clientId: string - memberId: string - memberAssignment: Buffer // will be always null - memberMetadata: Buffer // will be always null - groupInstanceId: string - assignment: { - topicPartitions: TopicPartition[] - }, + clientHost: string + clientId: string + memberId: string + memberAssignment: Buffer // will be always null + memberMetadata: Buffer // will be always null + groupInstanceId: string + assignment: { + topicPartitions: TopicPartition[] + }, } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); // clientHost - Nan::Set(returnObject, Nan::New("clientHost").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_host(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "clientHost"), + Napi::String::New(env, rd_kafka_MemberDescription_host(member)) + ); // clientId - Nan::Set(returnObject, Nan::New("clientId").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_client_id(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "clientId"), + Napi::String::New(env, rd_kafka_MemberDescription_client_id(member)) + ); // memberId - Nan::Set(returnObject, Nan::New("memberId").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_consumer_id(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "memberId"), + Napi::String::New(env, rd_kafka_MemberDescription_consumer_id(member)) + ); // memberAssignment - not passed to user, always null - Nan::Set(returnObject, Nan::New("memberAssignment").ToLocalChecked(), - Nan::Null()); + (returnObject).Set(Napi::String::New(env, "memberAssignment"), + env.Null()); // memberMetadata - not passed to user, always null - Nan::Set(returnObject, Nan::New("memberMetadata").ToLocalChecked(), - Nan::Null()); + (returnObject).Set(Napi::String::New(env, "memberMetadata"), + env.Null()); // groupInstanceId const char* group_instance_id = rd_kafka_MemberDescription_group_instance_id(member); if (group_instance_id) { - Nan::Set(returnObject, Nan::New("groupInstanceId").ToLocalChecked(), - Nan::New(group_instance_id).ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "groupInstanceId"), + Napi::String::New(env, group_instance_id)); } // assignment @@ -1020,13 +1018,13 @@ v8::Local FromMemberDescription( rd_kafka_MemberDescription_assignment(member); const rd_kafka_topic_partition_list_t* partitions = rd_kafka_MemberAssignment_partitions(assignment); - v8::Local topicPartitions = + Napi::Array topicPartitions = Conversion::TopicPartition::ToTopicPartitionV8Array(partitions, false); - v8::Local assignmentObject = Nan::New(); - Nan::Set(assignmentObject, Nan::New("topicPartitions").ToLocalChecked(), - topicPartitions); - Nan::Set(returnObject, Nan::New("assignment").ToLocalChecked(), - assignmentObject); + Napi::Object assignmentObject = Napi::Object::New(env); + (assignmentObject).Set(Napi::String::New(env, "topicPartitions"), + topicPartitions); + (returnObject).Set(Napi::String::New(env, "assignment"), + assignmentObject); return returnObject; } @@ -1034,7 +1032,7 @@ v8::Local FromMemberDescription( /** * @brief Converts a rd_kafka_ConsumerGroupDescription_t* into a v8 object. */ -v8::Local FromConsumerGroupDescription( +Napi::Object FromConsumerGroupDescription( const rd_kafka_ConsumerGroupDescription_t* desc) { /* Return object type: { @@ -1050,80 +1048,80 @@ v8::Local FromConsumerGroupDescription( authorizedOperations: AclOperationType[] - internally numbers } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); // groupId - Nan::Set( - returnObject, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupDescription_group_id(desc)) - .ToLocalChecked()); + ( + returnObject).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_ConsumerGroupDescription_group_id(desc)) + ); // error const rd_kafka_error_t* error = rd_kafka_ConsumerGroupDescription_error(desc); if (error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); + static_cast(rd_kafka_error_code(error)); std::string msg = std::string(rd_kafka_error_string(error)); - Nan::Set(returnObject, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, msg)); + (returnObject).Set(Napi::String::New(env, "error"), + RdKafkaError(code, msg)); } // members - v8::Local members = Nan::New(); + Napi::Array members = Napi::Array::New(env); size_t member_cnt = rd_kafka_ConsumerGroupDescription_member_count(desc); for (size_t i = 0; i < member_cnt; i++) { const rd_kafka_MemberDescription_t* member = - rd_kafka_ConsumerGroupDescription_member(desc, i); - Nan::Set(members, i, FromMemberDescription(member)); + rd_kafka_ConsumerGroupDescription_member(desc, i); + (members).Set(i, FromMemberDescription(member)); } - Nan::Set(returnObject, Nan::New("members").ToLocalChecked(), members); + (returnObject).Set(Napi::String::New(env, "members"), members); // isSimpleConsumerGroup bool is_simple = rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(desc); - Nan::Set(returnObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), - Nan::New(is_simple)); + (returnObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), + Napi::Boolean::New(env, is_simple)); // protocolType std::string protocolType = is_simple ? "simple" : "consumer"; - Nan::Set(returnObject, Nan::New("protocolType").ToLocalChecked(), - Nan::New(protocolType).ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "protocolType"), + Napi::String::New(env, protocolType)); // protocol - Nan::Set(returnObject, Nan::New("protocol").ToLocalChecked(), - Nan::New( - rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "protocol"), + Napi::String::New(env, + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + ); // partitionAssignor - Nan::Set(returnObject, Nan::New("partitionAssignor").ToLocalChecked(), - Nan::New( - rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "partitionAssignor"), + Napi::String::New(env, + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + ); // state - Nan::Set(returnObject, Nan::New("state").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupDescription_state(desc))); + (returnObject).Set(Napi::String::New(env, "state"), + Napi::Number::New(env, rd_kafka_ConsumerGroupDescription_state(desc))); // coordinator const rd_kafka_Node_t* coordinator = rd_kafka_ConsumerGroupDescription_coordinator(desc); if (coordinator) { - v8::Local coordinatorObject = - Conversion::Util::ToV8Object(coordinator); - Nan::Set(returnObject, Nan::New("coordinator").ToLocalChecked(), - coordinatorObject); + Napi::Object coordinatorObject = + Conversion::Util::ToV8Object(coordinator); + (returnObject).Set(Napi::String::New(env, "coordinator"), + coordinatorObject); } // authorizedOperations size_t authorized_operations_cnt; const rd_kafka_AclOperation_t* authorized_operations = rd_kafka_ConsumerGroupDescription_authorized_operations( - desc, &authorized_operations_cnt); + desc, &authorized_operations_cnt); if (authorized_operations) { - Nan::Set(returnObject, Nan::New("authorizedOperations").ToLocalChecked(), - Conversion::Util::ToV8Array(authorized_operations, - authorized_operations_cnt)); + (returnObject).Set(Napi::String::New(env, "authorizedOperations"), + Conversion::Util::ToV8Array(authorized_operations, + authorized_operations_cnt)); } return returnObject; @@ -1132,30 +1130,30 @@ v8::Local FromConsumerGroupDescription( /** * @brief Converts a rd_kafka_DescribeConsumerGroups_result_t* into a v8 object. */ -v8::Local FromDescribeConsumerGroupsResult( +Napi::Object FromDescribeConsumerGroupsResult( const rd_kafka_DescribeConsumerGroups_result_t* result) { /* Return object type: { groups: GroupDescription[] } */ - v8::Local returnObject = Nan::New(); - v8::Local groups = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); + Napi::Array groups = Napi::Array::New(env); size_t groups_cnt; const rd_kafka_ConsumerGroupDescription_t** groups_list = rd_kafka_DescribeConsumerGroups_result_groups(result, &groups_cnt); for (size_t i = 0; i < groups_cnt; i++) { const rd_kafka_ConsumerGroupDescription_t* group = groups_list[i]; - Nan::Set(groups, i, FromConsumerGroupDescription(group)); + (groups).Set(i, FromConsumerGroupDescription(group)); } - Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + (returnObject).Set(Napi::String::New(env, "groups"), groups); return returnObject; } /** * @brief Converts a rd_kafka_DeleteGroups_result_t* into a v8 array. */ -v8::Local FromDeleteGroupsResult( +Napi::Array FromDeleteGroupsResult( const rd_kafka_DeleteGroups_result_t* result) { /* Return object type: [{ @@ -1164,44 +1162,44 @@ v8::Local FromDeleteGroupsResult( error?: LibrdKafkaError }] */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_group_result_t** results = rd_kafka_DeleteGroups_result_groups(result, &result_cnt); for (size_t i = 0; i < result_cnt; i++) { const rd_kafka_group_result_t* group_result = results[i]; - v8::Local group_object = Nan::New(); + Napi::Object group_object = Napi::Object::New(env); - Nan::Set(group_object, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_group_result_name(group_result)) - .ToLocalChecked()); + (group_object).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_group_result_name(group_result)) + ); const rd_kafka_error_t* error = rd_kafka_group_result_error(group_result); if (!error) { - Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), - Nan::New(RD_KAFKA_RESP_ERR_NO_ERROR)); + (group_object).Set(Napi::String::New(env, "errorCode"), + Napi::Number::New(env, RD_KAFKA_RESP_ERR_NO_ERROR)); } else { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); + static_cast(rd_kafka_error_code(error)); const char* msg = rd_kafka_error_string(error); - Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), - Nan::New(code)); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, msg)); + (group_object).Set(Napi::String::New(env, "errorCode"), + Napi::Number::New(env, code)); + (group_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, msg)); } - Nan::Set(returnArray, i, group_object); + (returnArray).Set(i, group_object); } return returnArray; } /** - * @brief Converts a rd_kafka_ListConsumerGroupOffsets_result_t* + * @brief Converts a rd_kafka_ListConsumerGroupOffsets_result_t* * into a v8 Array. */ -v8::Local FromListConsumerGroupOffsetsResult( +Napi::Array FromListConsumerGroupOffsetsResult( const rd_kafka_ListConsumerGroupOffsets_result_t* result) { /* Return Object type: GroupResults[] = [{ @@ -1221,7 +1219,7 @@ v8::Local FromListConsumerGroupOffsetsResult( } */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_group_result_t** res = rd_kafka_ListConsumerGroupOffsets_result_groups(result, &result_cnt); @@ -1230,80 +1228,80 @@ v8::Local FromListConsumerGroupOffsetsResult( const rd_kafka_group_result_t* group_result = res[i]; // Create group result object - v8::Local group_object = Nan::New(); + Napi::Object group_object = Napi::Object::New(env); // Set groupId std::string groupId = rd_kafka_group_result_name(group_result); - Nan::Set(group_object, Nan::New("groupId").ToLocalChecked(), - Nan::New(groupId.c_str()).ToLocalChecked()); + (group_object).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, groupId.c_str())); // Set group-level error (if any) const rd_kafka_error_t* group_error = - rd_kafka_group_result_error(group_result); + rd_kafka_group_result_error(group_result); if (group_error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(group_error)); + static_cast(rd_kafka_error_code(group_error)); const char* msg = rd_kafka_error_string(group_error); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, msg)); + (group_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, msg)); } // Get the list of partitions for this group const rd_kafka_topic_partition_list_t* partitionList = - rd_kafka_group_result_partitions(group_result); + rd_kafka_group_result_partitions(group_result); // Prepare array for TopicPartitionOffset[] - v8::Local partitionsArray = Nan::New(); + Napi::Array partitionsArray = Napi::Array::New(env); int partitionIndex = 0; for (int j = 0; j < partitionList->cnt; j++) { const rd_kafka_topic_partition_t* partition = &partitionList->elems[j]; // Create the TopicPartitionOffset object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, and offset - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("offset").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, partition->offset)); // Set metadata (if available) if (partition->metadata != nullptr) { - Nan::Set( - partition_object, Nan::New("metadata").ToLocalChecked(), - Nan::New(static_cast(partition->metadata)) - .ToLocalChecked()); + ( + partition_object).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, static_cast(partition->metadata)) + ); } else { - Nan::Set(partition_object, Nan::New("metadata").ToLocalChecked(), - Nan::Null()); + (partition_object).Set(Napi::String::New(env, "metadata"), + env.Null()); } // Set leaderEpoch (if available) int32_t leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(partition); + rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { - Nan::Set(partition_object, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (partition_object).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } // Set partition-level error (if any) if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { - RdKafka::ErrorCode code = - static_cast(partition->err); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + RdKafka::ErrorCode code = + static_cast(partition->err); + (group_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, rd_kafka_err2str(partition->err))); } - Nan::Set(partitionsArray, partitionIndex++, partition_object); + (partitionsArray).Set(partitionIndex++, partition_object); } - Nan::Set(group_object, Nan::New("partitions").ToLocalChecked(), - partitionsArray); + (group_object).Set(Napi::String::New(env, "partitions"), + partitionsArray); - Nan::Set(returnArray, i, group_object); + (returnArray).Set(i, group_object); } return returnArray; @@ -1312,7 +1310,7 @@ v8::Local FromListConsumerGroupOffsetsResult( /** * @brief Converts a rd_kafka_DeleteRecords_result_t* into a v8 Array. */ -v8::Local FromDeleteRecordsResult( +Napi::Array FromDeleteRecordsResult( const rd_kafka_DeleteRecords_result_t* result) { /* Return object type: [{ @@ -1325,30 +1323,30 @@ v8::Local FromDeleteRecordsResult( const rd_kafka_topic_partition_list_t* partitionList = rd_kafka_DeleteRecords_result_offsets(result); - v8::Local partitionsArray = Nan::New(); + Napi::Array partitionsArray = Napi::Array::New(env); int partitionIndex = 0; for (int j = 0; j < partitionList->cnt; j++) { const rd_kafka_topic_partition_t* partition = &partitionList->elems[j]; // Create the TopicPartitionOffset object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, and offset and error(if required) - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("lowWatermark").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "lowWatermark"), + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); - Nan::Set(partition_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + (partition_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, rd_kafka_err2str(partition->err))); } - Nan::Set(partitionsArray, partitionIndex++, partition_object); + (partitionsArray).Set(partitionIndex++, partition_object); } return partitionsArray; @@ -1357,7 +1355,7 @@ v8::Local FromDeleteRecordsResult( /** * @brief Converts a rd_kafka_DescribeTopics_result_t* into a v8 Array. */ -v8::Local FromDescribeTopicsResult( +Napi::Array FromDescribeTopicsResult( const rd_kafka_DescribeTopics_result_t* result) { /* Return object type: [{ @@ -1385,7 +1383,7 @@ v8::Local FromDescribeTopicsResult( } */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_TopicDescription_t** results = rd_kafka_DescribeTopics_result_topics(result, &result_cnt); @@ -1393,80 +1391,80 @@ v8::Local FromDescribeTopicsResult( int topicIndex = 0; for (size_t i = 0; i < result_cnt; i++) { - v8::Local topic_object = Nan::New(); + Napi::Object topic_object = Napi::Object::New(env); const char* topic_name = rd_kafka_TopicDescription_name(results[i]); - Nan::Set(topic_object, Nan::New("name").ToLocalChecked(), - Nan::New(topic_name).ToLocalChecked()); + (topic_object).Set(Napi::String::New(env, "name"), + Napi::String::New(env, topic_name)); const rd_kafka_Uuid_t* topic_id = - rd_kafka_TopicDescription_topic_id(results[i]); - Nan::Set(topic_object, Nan::New("topicId").ToLocalChecked(), - Conversion::Util::UuidToV8Object(topic_id)); + rd_kafka_TopicDescription_topic_id(results[i]); + (topic_object).Set(Napi::String::New(env, "topicId"), + Conversion::Util::UuidToV8Object(topic_id)); int is_internal = rd_kafka_TopicDescription_is_internal(results[i]); - Nan::Set(topic_object, Nan::New("isInternal").ToLocalChecked(), - Nan::New(is_internal)); + (topic_object).Set(Napi::String::New(env, "isInternal"), + Napi::Boolean::New(env, is_internal)); const rd_kafka_error_t* error = rd_kafka_TopicDescription_error(results[i]); if (error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); - Nan::Set(topic_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, rd_kafka_error_string(error))); + static_cast(rd_kafka_error_code(error)); + (topic_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, rd_kafka_error_string(error))); } size_t authorized_operations_cnt; const rd_kafka_AclOperation_t* authorized_operations = - rd_kafka_TopicDescription_authorized_operations( - results[i], &authorized_operations_cnt); + rd_kafka_TopicDescription_authorized_operations( + results[i], &authorized_operations_cnt); if (authorized_operations) { - Nan::Set(topic_object, Nan::New("authorizedOperations").ToLocalChecked(), - Conversion::Util::ToV8Array(authorized_operations, - authorized_operations_cnt)); + (topic_object).Set(Napi::String::New(env, "authorizedOperations"), + Conversion::Util::ToV8Array(authorized_operations, + authorized_operations_cnt)); } size_t partition_cnt; const rd_kafka_TopicPartitionInfo_t** partitions = - rd_kafka_TopicDescription_partitions(results[i], &partition_cnt); - v8::Local partitionsArray = Nan::New(); + rd_kafka_TopicDescription_partitions(results[i], &partition_cnt); + Napi::Array partitionsArray = Napi::Array::New(env); for (size_t j = 0; j < partition_cnt; j++) { - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); const rd_kafka_TopicPartitionInfo_t* partition = partitions[j]; - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New( - rd_kafka_TopicPartitionInfo_partition(partition))); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, + rd_kafka_TopicPartitionInfo_partition(partition))); const rd_kafka_Node_t* leader = - rd_kafka_TopicPartitionInfo_leader(partition); - Nan::Set(partition_object, Nan::New("leader").ToLocalChecked(), - Conversion::Util::ToV8Object(leader)); + rd_kafka_TopicPartitionInfo_leader(partition); + (partition_object).Set(Napi::String::New(env, "leader"), + Conversion::Util::ToV8Object(leader)); size_t isr_cnt; const rd_kafka_Node_t** isr = - rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); - v8::Local isrArray = Nan::New(); + rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); + Napi::Array isrArray = Napi::Array::New(env); for (size_t k = 0; k < isr_cnt; k++) { - Nan::Set(isrArray, k, Conversion::Util::ToV8Object(isr[k])); + (isrArray).Set(k, Conversion::Util::ToV8Object(isr[k])); } - Nan::Set(partition_object, Nan::New("isr").ToLocalChecked(), isrArray); + (partition_object).Set(Napi::String::New(env, "isr"), isrArray); size_t replicas_cnt; const rd_kafka_Node_t** replicas = - rd_kafka_TopicPartitionInfo_replicas(partition, &replicas_cnt); - v8::Local replicasArray = Nan::New(); + rd_kafka_TopicPartitionInfo_replicas(partition, &replicas_cnt); + Napi::Array replicasArray = Napi::Array::New(env); for (size_t k = 0; k < replicas_cnt; k++) { - Nan::Set(replicasArray, k, Conversion::Util::ToV8Object(replicas[k])); + (replicasArray).Set(k, Conversion::Util::ToV8Object(replicas[k])); } - Nan::Set(partition_object, Nan::New("replicas").ToLocalChecked(), - replicasArray); + (partition_object).Set(Napi::String::New(env, "replicas"), + replicasArray); - Nan::Set(partitionsArray, j, partition_object); + (partitionsArray).Set(j, partition_object); } - Nan::Set(topic_object, Nan::New("partitions").ToLocalChecked(), - partitionsArray); + (topic_object).Set(Napi::String::New(env, "partitions"), + partitionsArray); - Nan::Set(returnArray, topicIndex++, topic_object); + (returnArray).Set(topicIndex++, topic_object); } return returnArray; @@ -1475,7 +1473,7 @@ v8::Local FromDescribeTopicsResult( /** * @brief Converts a rd_kafka_ListOffsets_result_t* into a v8 Array. */ -v8::Local FromListOffsetsResult( +Napi::Array FromListOffsetsResult( const rd_kafka_ListOffsets_result_t* result) { /* Return object type: [{ @@ -1491,40 +1489,40 @@ v8::Local FromListOffsetsResult( const rd_kafka_ListOffsetsResultInfo_t** results = rd_kafka_ListOffsets_result_infos(result, &result_cnt); - v8::Local resultArray = Nan::New(); + Napi::Array resultArray = Napi::Array::New(env); int partitionIndex = 0; for (i = 0; i < result_cnt; i++) { const rd_kafka_topic_partition_t* partition = - rd_kafka_ListOffsetsResultInfo_topic_partition(results[i]); + rd_kafka_ListOffsetsResultInfo_topic_partition(results[i]); int64_t timestamp = rd_kafka_ListOffsetsResultInfo_timestamp(results[i]); // Create the ListOffsetsResult object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, offset, error and timestamp - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("offset").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); - Nan::Set(partition_object, Nan::New("error").ToLocalChecked(), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + (partition_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, rd_kafka_err2str(partition->err))); } // Set leaderEpoch (if available) int32_t leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(partition); + rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { - Nan::Set(partition_object, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (partition_object).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(partition_object, Nan::New("timestamp").ToLocalChecked(), - Nan::New(timestamp)); + (partition_object).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, timestamp)); - Nan::Set(resultArray, partitionIndex++, partition_object); + (resultArray).Set(partitionIndex++, partition_object); } return resultArray; @@ -1535,8 +1533,8 @@ v8::Local FromListOffsetsResult( } // namespace Conversion namespace Util { - std::string FromV8String(v8::Local val) { - Nan::Utf8String keyUTF8(val); + std::string FromV8String(Napi::String val) { + std::string keyUTF8 = val.As(); return std::string(*keyUTF8); } } // Namespace Util diff --git a/src/common.h b/src/common.h index 121a5cda..4d500003 100644 --- a/src/common.h +++ b/src/common.h @@ -11,7 +11,8 @@ #ifndef SRC_COMMON_H_ #define SRC_COMMON_H_ -#include +#include +#include #include #include @@ -21,8 +22,6 @@ #include "rdkafkacpp.h" // NOLINT #include "rdkafka.h" // NOLINT -#include "src/errors.h" - typedef std::vector BrokerMetadataList; typedef std::vector PartitionMetadataList; typedef std::vector TopicMetadataList; @@ -31,16 +30,16 @@ namespace NodeKafka { void Log(std::string); -template T GetParameter(v8::Local, std::string, T); +template T GetParameter(Napi::Object, std::string, T); template<> std::string GetParameter( - v8::Local, std::string, std::string); + Napi::Object, std::string, std::string); template<> std::vector GetParameter >( - v8::Local, std::string, std::vector); -template<> v8::Local GetParameter >( - v8::Local, std::string, v8::Local); + Napi::Object, std::string, std::vector); +template<> Napi::Array GetParameter( + Napi::Object, std::string, Napi::Array); // template int GetParameter(v8::Local v8ArrayToStringVector(v8::Local); -std::list v8ArrayToStringList(v8::Local); +std::vector v8ArrayToStringVector(Napi::Array); +std::list v8ArrayToStringList(Napi::Array); class scoped_mutex_lock { public: @@ -96,91 +95,91 @@ class scoped_shared_read_lock { namespace Conversion { namespace Util { -std::vector ToStringVector(v8::Local); -v8::Local ToV8Array(std::vector); -v8::Local ToV8Array(const rd_kafka_error_t **error_list, +std::vector ToStringVector(Napi::Array); +Napi::Array ToV8Array(std::vector); +Napi::Array ToV8Array(const rd_kafka_error_t **error_list, size_t error_cnt); -v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid); -v8::Local ToV8Array(const rd_kafka_AclOperation_t *, size_t); +Napi::Object UuidToV8Object(const rd_kafka_Uuid_t* uuid); +Napi::Array ToV8Array(const rd_kafka_AclOperation_t *, size_t); -v8::Local ToV8Object(const rd_kafka_Node_t *); +Napi::Object ToV8Object(const rd_kafka_Node_t *); } // namespace Util namespace Admin { // Topics from topic object, or topic object array -rd_kafka_NewTopic_t *FromV8TopicObject(v8::Local, +rd_kafka_NewTopic_t *FromV8TopicObject(Napi::Object, std::string &errstr); -rd_kafka_NewTopic_t **FromV8TopicObjectArray(v8::Local); +rd_kafka_NewTopic_t **FromV8TopicObjectArray(Napi::Array); // ListGroups: request std::vector FromV8GroupStateArray( - v8::Local); + Napi::Array); // ListGroups: response -v8::Local FromListConsumerGroupsResult( +Napi::Object FromListConsumerGroupsResult( const rd_kafka_ListConsumerGroups_result_t *); // DescribeGroups: response -v8::Local FromMemberDescription( +Napi::Object FromMemberDescription( const rd_kafka_MemberDescription_t *member); -v8::Local FromConsumerGroupDescription( +Napi::Object FromConsumerGroupDescription( const rd_kafka_ConsumerGroupDescription_t *desc); -v8::Local FromDescribeConsumerGroupsResult( +Napi::Object FromDescribeConsumerGroupsResult( const rd_kafka_DescribeConsumerGroups_result_t *); // DeleteGroups: Response -v8::Local FromDeleteGroupsResult( +Napi::Array FromDeleteGroupsResult( const rd_kafka_DeleteGroups_result_t *); // ListConsumerGroupOffsets: Response -v8::Local FromListConsumerGroupOffsetsResult( +Napi::Array FromListConsumerGroupOffsetsResult( const rd_kafka_ListConsumerGroupOffsets_result_t *result); // DeleteRecords: Response -v8::Local FromDeleteRecordsResult( +Napi::Array FromDeleteRecordsResult( const rd_kafka_DeleteRecords_result_t* result); // DescribeTopics: Response -v8::Local FromDescribeTopicsResult( +Napi::Array FromDescribeTopicsResult( const rd_kafka_DescribeTopics_result_t* result); // ListOffsets: Response -v8::Local FromListOffsetsResult( +Napi::Array FromListOffsetsResult( const rd_kafka_ListOffsets_result_t* result); } // namespace Admin namespace TopicPartition { -v8::Local ToV8Array(std::vector &); -v8::Local ToTopicPartitionV8Array( +Napi::Array ToV8Array(std::vector &); +Napi::Array ToTopicPartitionV8Array( const rd_kafka_topic_partition_list_t *, bool include_offset); -RdKafka::TopicPartition *FromV8Object(v8::Local); -std::vector FromV8Array(const v8::Local &); // NOLINT +RdKafka::TopicPartition *FromV8Object(Napi::Object); +std::vector FromV8Array(const Napi::Array &); // NOLINT rd_kafka_topic_partition_list_t *TopicPartitionv8ArrayToTopicPartitionList( - v8::Local parameter, bool include_offset); + Napi::Array parameter, bool include_offset); rd_kafka_topic_partition_list_t * TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( - v8::Local parameter); + Napi::Array parameter); } // namespace TopicPartition namespace Metadata { -v8::Local ToV8Object(RdKafka::Metadata*); +Napi::Object ToV8Object(RdKafka::Metadata*); } // namespace Metadata namespace Message { -v8::Local ToV8Object(RdKafka::Message*); -v8::Local ToV8Object(RdKafka::Message*, bool, bool); +Napi::Object ToV8Object(RdKafka::Message*); +Napi::Object ToV8Object(RdKafka::Message*, bool, bool); } } // namespace Conversion namespace Util { - std::string FromV8String(v8::Local); + std::string FromV8String(Napi::String); } } // namespace NodeKafka diff --git a/src/config.cc b/src/config.cc index 5f66b2d8..d3411751 100644 --- a/src/config.cc +++ b/src/config.cc @@ -13,11 +13,10 @@ #include #include -using Nan::MaybeLocal; -using Nan::Maybe; -using v8::Local; -using v8::String; -using v8::Object; +using Napi::MaybeLocal; +using Napi::Maybe; +using Napi::String; +using Napi::Object; using std::cout; using std::endl; @@ -34,23 +33,23 @@ void Conf::DumpConfig(std::list *dump) { std::cout << std::endl; } -Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, std::string &errstr) { // NOLINT - v8::Local context = Nan::GetCurrentContext(); +Conf * Conf::create(RdKafka::Conf::ConfType type, Napi::Object object, std::string &errstr) { // NOLINT + v8::Local context = Napi::GetCurrentContext(); Conf* rdconf = static_cast(RdKafka::Conf::create(type)); v8::MaybeLocal _property_names = object->GetOwnPropertyNames( - Nan::GetCurrentContext()); - v8::Local property_names = _property_names.ToLocalChecked(); + Napi::GetCurrentContext()); + Napi::Array property_names = _property_names; for (unsigned int i = 0; i < property_names->Length(); ++i) { std::string string_value; std::string string_key; - v8::Local key = Nan::Get(property_names, i).ToLocalChecked(); - v8::Local value = Nan::Get(object, key).ToLocalChecked(); + Napi::Value key = (property_names).Get(i); + Napi::Value value = (object).Get(key); - if (key->IsString()) { - Nan::Utf8String utf8_key(key); + if (key.IsString()) { + std::string utf8_key = key.As(); string_key = std::string(*utf8_key); } else { continue; @@ -58,21 +57,21 @@ Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, if (!value->IsFunction()) { #if NODE_MAJOR_VERSION > 6 - if (value->IsInt32()) { + if (value.IsNumber()) { string_value = std::to_string( value->Int32Value(context).ToChecked()); } else if (value->IsUint32()) { string_value = std::to_string( value->Uint32Value(context).ToChecked()); } else if (value->IsBoolean()) { - const bool v = Nan::To(value).ToChecked(); + const bool v = value.As().Value().ToChecked(); string_value = v ? "true" : "false"; } else { - Nan::Utf8String utf8_value(value.As()); + std::string utf8_value = value.As(.As()); string_value = std::string(*utf8_value); } #else - Nan::Utf8String utf8_value(value.As()); + std::string utf8_value = value.As(.As()); string_value = std::string(*utf8_value); #endif if (rdconf->set(string_key, string_value, errstr) @@ -91,7 +90,7 @@ Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, void Conf::ConfigureCallback( const std::string &string_key, - const v8::Local &cb, + const Napi::Function &cb, bool add, std::string &errstr) { if (string_key.compare("rebalance_cb") == 0) { NodeKafka::Callbacks::Rebalance *rebalance = rebalance_cb(); diff --git a/src/config.h b/src/config.h index d7a5a786..7d41cc75 100644 --- a/src/config.h +++ b/src/config.h @@ -10,7 +10,8 @@ #ifndef SRC_CONFIG_H_ #define SRC_CONFIG_H_ -#include +#include +#include #include #include #include @@ -26,7 +27,7 @@ class Conf : public RdKafka::Conf { public: ~Conf(); - static Conf* create(RdKafka::Conf::ConfType, v8::Local, std::string &); // NOLINT + static Conf* create(RdKafka::Conf::ConfType, Napi::Object, std::string &); // NOLINT static void DumpConfig(std::list *); void listen(); @@ -34,7 +35,7 @@ class Conf : public RdKafka::Conf { void ConfigureCallback( const std::string &string_key, - const v8::Local &cb, + const Napi::Function &cb, bool add, std::string &errstr); bool is_sasl_oauthbearer() const; diff --git a/src/connection.cc b/src/connection.cc index 189c10f1..e69de29b 100644 --- a/src/connection.cc +++ b/src/connection.cc @@ -1,678 +0,0 @@ -/* - * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016-2023 Blizzard Entertainment - * (c) 2023 Confluent, Inc. - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ -#include "src/connection.h" - -#include -#include -#include - -#include "src/workers.h" - -using RdKafka::Conf; - -namespace NodeKafka { - -/** - * @brief Connection v8 wrapped object. - * - * Wraps the RdKafka::Handle object with compositional inheritence and - * provides sensible defaults for exposing callbacks to node - * - * This object can't itself expose methods to the prototype directly, as far - * as I can tell. But it can provide the NAN_METHODS that just need to be added - * to the prototype. Since connections, etc. are managed differently based on - * whether it is a producer or consumer, they manage that. This base class - * handles some of the wrapping functionality and more importantly, the - * configuration of callbacks - * - * Any callback available to both consumers and producers, like logging or - * events will be handled in here. - * - * @sa RdKafka::Handle - * @sa NodeKafka::Client - */ - -Connection::Connection(Conf* gconfig, Conf* tconfig): - m_event_cb(), - m_gconfig(gconfig), - m_tconfig(tconfig) { - std::string errstr; - - m_client = NULL; - m_is_closing = false; - uv_rwlock_init(&m_connection_lock); - - // Try to set the event cb. Shouldn't be an error here, but if there - // is, it doesn't get reported. - // - // Perhaps node new methods should report this as an error? But there - // isn't anything the user can do about it. - m_gconfig->set("event_cb", &m_event_cb, errstr); - } - -/* Use an existing Connection object as the underlying for this object. - * At this point, the underlying connection is assumed to be connected with - * the m_client set. */ -Connection::Connection(Connection *existing): - m_event_cb() { - m_client = existing->m_client; - - m_gconfig = existing->m_gconfig; - m_tconfig = existing->m_tconfig; - - m_is_closing = false; - m_has_underlying = true; - - // We must share the same connection lock as the existing connection to - // avoid getting disconnected while the existing connection is still in use. - m_connection_lock = existing->m_connection_lock; - } - - -Connection::~Connection() { - // The underlying connection will take care of cleanup. - if (m_has_underlying) { - return; - } - - uv_rwlock_destroy(&m_connection_lock); - if (m_tconfig) { - delete m_tconfig; - } - - if (m_gconfig) { - delete m_gconfig; - } -} - -Baton Connection::rdkafkaErrorToBaton(RdKafka::Error* error) { - if (NULL == error) { - return Baton(RdKafka::ERR_NO_ERROR); - } else { - Baton result(error->code(), error->str(), error->is_fatal(), - error->is_retriable(), error->txn_requires_abort()); - delete error; - return result; - } -} - -// If OAUTHBEARER authentication is set up, then push the callbacks onto the -// SASL queue so we don't need to keep polling. This method should be called -// before the client is created. -Baton Connection::setupSaslOAuthBearerConfig() { - if (!m_gconfig->is_sasl_oauthbearer()) { - return Baton(RdKafka::ERR_NO_ERROR); - } - - std::string errstr; - if (m_gconfig->enable_sasl_queue(true, errstr) != RdKafka::Conf::CONF_OK) { - return Baton(RdKafka::ERR__STATE, errstr); - } - - return Baton(RdKafka::ERR_NO_ERROR); -} - -// If OAUTHBEARER authentication is set up, then handle the callbacks on -// the background thread. This method should be called after the client is -// created and only if `setupSaslOAuthBearerConfig` is called earlier. -Baton Connection::setupSaslOAuthBearerBackgroundQueue() { - if (!m_gconfig->is_sasl_oauthbearer()) { - return Baton(RdKafka::ERR_NO_ERROR); - } - - RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); - return rdkafkaErrorToBaton(error); -} - -RdKafka::TopicPartition* Connection::GetPartition(std::string &topic) { - return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA); -} - -RdKafka::TopicPartition* Connection::GetPartition(std::string &topic, int partition) { // NOLINT - return RdKafka::TopicPartition::create(topic, partition); -} - -bool Connection::IsConnected() const { - return !m_is_closing && m_client != NULL; -} - -bool Connection::IsClosing() const { - return m_client != NULL && m_is_closing; -} - -RdKafka::Handle* Connection::GetClient() { - return m_client; -} - -std::string Connection::Name() const { - if (!IsConnected()) { - return std::string(""); - } - return std::string(m_client->name()); -} - -Baton Connection::CreateTopic(std::string topic_name) { - return CreateTopic(topic_name, NULL); -} - -Baton Connection::CreateTopic(std::string topic_name, RdKafka::Conf* conf) { - std::string errstr; - - RdKafka::Topic* topic = NULL; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - topic = RdKafka::Topic::create(m_client, topic_name, conf, errstr); - } else { - return Baton(RdKafka::ErrorCode::ERR__STATE); - } - } else { - return Baton(RdKafka::ErrorCode::ERR__STATE); - } - - if (!errstr.empty()) { - return Baton(RdKafka::ErrorCode::ERR_TOPIC_EXCEPTION, errstr); - } - - // Maybe do it this way later? Then we don't need to do static_cast - // - return Baton(topic); -} - -Baton Connection::QueryWatermarkOffsets( - std::string topic_name, int32_t partition, - int64_t* low_offset, int64_t* high_offset, - int timeout_ms) { - // Check if we are connected first - - RdKafka::ErrorCode err; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->query_watermark_offsets(topic_name, partition, - low_offset, high_offset, timeout_ms); - - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - return Baton(err); -} - -/** - * Look up the offsets for the given partitions by timestamp. - * - * The returned offset for each partition is the earliest offset whose - * timestamp is greater than or equal to the given timestamp in the - * corresponding partition. - * - * @returns A baton specifying the error state. If there was no error, - * there still may be an error on a topic partition basis. - */ -Baton Connection::OffsetsForTimes( - std::vector &toppars, - int timeout_ms) { - // Check if we are connected first - - RdKafka::ErrorCode err; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->offsetsForTimes(toppars, timeout_ms); - - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - return Baton(err); -} - -Baton Connection::GetMetadata( - bool all_topics, std::string topic_name, int timeout_ms) { - RdKafka::Topic* topic = NULL; - RdKafka::ErrorCode err; - - std::string errstr; - - if (!topic_name.empty()) { - Baton b = CreateTopic(topic_name); - if (b.err() == RdKafka::ErrorCode::ERR_NO_ERROR) { - topic = b.data(); - } - } - - RdKafka::Metadata* metadata = NULL; - - if (!errstr.empty()) { - return Baton(RdKafka::ERR_TOPIC_EXCEPTION); - } - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->metadata(all_topics, topic, &metadata, timeout_ms); - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - if (topic != NULL) - delete topic; - - if (err == RdKafka::ERR_NO_ERROR) { - return Baton(metadata); - } else { - // metadata is not set here - // @see https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 // NOLINT - return Baton(err); - } -} - -Baton Connection::SetSaslCredentials( - std::string username, std::string password) { - RdKafka::Error *error; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - error = m_client->sasl_set_credentials(username, password); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - return rdkafkaErrorToBaton(error); -} - -Baton Connection::SetOAuthBearerToken( - const std::string& value, int64_t lifetime_ms, - const std::string& principal_name, - const std::list& extensions) { - RdKafka::ErrorCode error_code; - std::string errstr; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - error_code = m_client->oauthbearer_set_token( - value, lifetime_ms, principal_name, extensions, errstr); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - if (error_code != RdKafka::ERR_NO_ERROR) { - return Baton(error_code, errstr); - } - - return Baton(error_code); -} - -Baton Connection::SetOAuthBearerTokenFailure(const std::string& errstr) { - RdKafka::ErrorCode error_code; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - error_code = m_client->oauthbearer_set_token_failure(errstr); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - return Baton(error_code); -} - -void Connection::ConfigureCallback( - const std::string &string_key, const v8::Local &cb, bool add) { - if (string_key.compare("event_cb") == 0) { - if (add) { - this->m_event_cb.dispatcher.AddCallback(cb); - } else { - this->m_event_cb.dispatcher.RemoveCallback(cb); - } - } -} - -// NAN METHODS - -NAN_METHOD(Connection::NodeGetMetadata) { - Nan::HandleScope scope; - - Connection* obj = ObjectWrap::Unwrap(info.This()); - - v8::Local config; - if (info[0]->IsObject()) { - config = info[0].As(); - } else { - config = Nan::New(); - } - - if (!info[1]->IsFunction()) { - Nan::ThrowError("Second parameter must be a callback"); - return; - } - - v8::Local cb = info[1].As(); - - std::string topic = GetParameter(config, "topic", ""); - bool allTopics = GetParameter(config, "allTopics", true); - int timeout_ms = GetParameter(config, "timeout", 30000); - - Nan::Callback *callback = new Nan::Callback(cb); - - Nan::AsyncQueueWorker(new Workers::ConnectionMetadata( - callback, obj, topic, timeout_ms, allTopics)); - - info.GetReturnValue().Set(Nan::Null()); -} - -NAN_METHOD(Connection::NodeOffsetsForTimes) { - Nan::HandleScope scope; - - if (info.Length() < 3 || !info[0]->IsArray()) { - // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); - } - - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); - - int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); - - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); - } - - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); - - Connection* handle = ObjectWrap::Unwrap(info.This()); - - Nan::AsyncQueueWorker( - new Workers::Handle::OffsetsForTimes(callback, handle, - toppars, timeout_ms)); - - info.GetReturnValue().Set(Nan::Null()); -} - -NAN_METHOD(Connection::NodeQueryWatermarkOffsets) { - Nan::HandleScope scope; - - Connection* obj = ObjectWrap::Unwrap(info.This()); - - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a topic string");; - return; - } - - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a partition number"); - return; - } - - if (!info[2]->IsNumber()) { - Nan::ThrowError("3rd parameter must be a number of milliseconds"); - return; - } - - if (!info[3]->IsFunction()) { - Nan::ThrowError("4th parameter must be a callback"); - return; - } - - // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); - // The first parameter is the topic - std::string topic_name(*topicUTF8); - - // Second parameter is the partition - int32_t partition = Nan::To(info[1]).FromJust(); - - // Third parameter is the timeout - int timeout_ms = Nan::To(info[2]).FromJust(); - - // Fourth parameter is the callback - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); - - Nan::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( - callback, obj, topic_name, partition, timeout_ms)); - - info.GetReturnValue().Set(Nan::Null()); -} - -NAN_METHOD(Connection::NodeSetSaslCredentials) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a username string"); - return; - } - - if (!info[1]->IsString()) { - Nan::ThrowError("2nd parameter must be a password string"); - return; - } - - // Get string pointer for the username - Nan::Utf8String usernameUTF8(Nan::To(info[0]).ToLocalChecked()); - // The first parameter is the username - std::string username(*usernameUTF8); - - // Get string pointer for the password - Nan::Utf8String passwordUTF8(Nan::To(info[1]).ToLocalChecked()); - // The first parameter is the password - std::string password(*passwordUTF8); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetSaslCredentials(username, password); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); - } - - info.GetReturnValue().Set(Nan::Null()); -} - - -// Node methods -NAN_METHOD(Connection::NodeConfigureCallbacks) { - Nan::HandleScope scope; - - if (info.Length() < 2 || - !info[0]->IsBoolean() || - !info[1]->IsObject()) { - // Just throw an exception - return Nan::ThrowError("Need to specify a callbacks object"); - } - v8::Local context = Nan::GetCurrentContext(); - Connection* obj = ObjectWrap::Unwrap(info.This()); - - const bool add = Nan::To(info[0]).ToChecked(); - v8::Local configs_object = - info[1]->ToObject(context).ToLocalChecked(); - v8::Local configs_property_names = - configs_object->GetOwnPropertyNames(context).ToLocalChecked(); - - for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { - std::string configs_string_key; - - v8::Local configs_key = - Nan::Get(configs_property_names, j).ToLocalChecked(); - v8::Local configs_value = - Nan::Get(configs_object, configs_key).ToLocalChecked(); - - int config_type = 0; - if (configs_value->IsObject() && configs_key->IsString()) { - Nan::Utf8String configs_utf8_key(configs_key); - configs_string_key = std::string(*configs_utf8_key); - if (configs_string_key.compare("global") == 0) { - config_type = 1; - } else if (configs_string_key.compare("topic") == 0) { - config_type = 2; - } else if (configs_string_key.compare("event") == 0) { - config_type = 3; - } else { - continue; - } - } else { - continue; - } - - v8::Local object = - configs_value->ToObject(context).ToLocalChecked(); - v8::Local property_names = - object->GetOwnPropertyNames(context).ToLocalChecked(); - - for (unsigned int i = 0; i < property_names->Length(); ++i) { - std::string errstr; - std::string string_key; - - v8::Local key = Nan::Get(property_names, i).ToLocalChecked(); - v8::Local value = Nan::Get(object, key).ToLocalChecked(); - - if (key->IsString()) { - Nan::Utf8String utf8_key(key); - string_key = std::string(*utf8_key); - } else { - continue; - } - - if (value->IsFunction()) { - v8::Local cb = value.As(); - switch (config_type) { - case 1: - obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - return Nan::ThrowError(errstr.c_str()); - } - break; - case 2: - obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - return Nan::ThrowError(errstr.c_str()); - } - break; - case 3: - obj->ConfigureCallback(string_key, cb, add); - break; - } - } - } - } - - info.GetReturnValue().Set(Nan::True()); -} - -NAN_METHOD(Connection::NodeSetOAuthBearerToken) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a token string"); - return; - } - - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a lifetime_ms number"); - return; - } - - if (!info[2]->IsString()) { - Nan::ThrowError("3rd parameter must be a principal_name string"); - return; - } - - if (!info[3]->IsNullOrUndefined() && !info[3]->IsArray()) { - Nan::ThrowError("4th parameter must be an extensions array or null"); - return; - } - - // Get string pointer for the token - Nan::Utf8String tokenUtf8(Nan::To(info[0]).ToLocalChecked()); - std::string token(*tokenUtf8); - - // Get the lifetime_ms - int64_t lifetime_ms = Nan::To(info[1]).FromJust(); - - // Get string pointer for the principal_name - Nan::Utf8String principal_nameUtf8( - Nan::To(info[2]).ToLocalChecked()); - std::string principal_name(*principal_nameUtf8); - - // Get the extensions (if any) - std::list extensions; - if (!info[3]->IsNullOrUndefined()) { - v8::Local extensionsArray = info[3].As(); - extensions = v8ArrayToStringList(extensionsArray); - } - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = - obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); - } - - info.GetReturnValue().Set(Nan::Null()); -} - -NAN_METHOD(Connection::NodeSetOAuthBearerTokenFailure) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be an error string"); - return; - } - - // Get string pointer for the error string - Nan::Utf8String errstrUtf8(Nan::To(info[0]).ToLocalChecked()); - std::string errstr(*errstrUtf8); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetOAuthBearerTokenFailure(errstr); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); - } - - info.GetReturnValue().Set(Nan::Null()); -} - -NAN_METHOD(Connection::NodeName) { - Connection* obj = ObjectWrap::Unwrap(info.This()); - std::string name = obj->Name(); - info.GetReturnValue().Set(Nan::New(name).ToLocalChecked()); -} - -} // namespace NodeKafka diff --git a/src/connection.h b/src/connection.h index 532468fe..12c03194 100644 --- a/src/connection.h +++ b/src/connection.h @@ -11,7 +11,8 @@ #ifndef SRC_CONNECTION_H_ #define SRC_CONNECTION_H_ -#include +#include +#include #include #include #include @@ -23,6 +24,7 @@ #include "src/errors.h" #include "src/config.h" #include "src/callbacks.h" +#include "src/workers.h" namespace NodeKafka { @@ -46,26 +48,216 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class Connection : public Nan::ObjectWrap { +template class Connection : public Napi::ObjectWrap { public: - bool IsConnected() const; - bool IsClosing() const; + bool IsConnected() const { + return !m_is_closing && m_client != NULL; + } + + bool IsClosing() const { + return m_client != NULL && m_is_closing; + } // Baton - Baton CreateTopic(std::string); - Baton CreateTopic(std::string, RdKafka::Conf*); - Baton GetMetadata(bool, std::string, int); - Baton QueryWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*, int); - Baton OffsetsForTimes(std::vector &, int); - Baton SetSaslCredentials(std::string, std::string); - Baton SetOAuthBearerToken(const std::string&, int64_t, const std::string&, - const std::list&); - Baton SetOAuthBearerTokenFailure(const std::string&); + Baton CreateTopic(std::string topic_name, RdKafka::Conf* conf = NULL) { + std::string errstr; + + RdKafka::Topic* topic = NULL; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + topic = RdKafka::Topic::create(m_client, topic_name, conf, errstr); + } else { + return Baton(RdKafka::ErrorCode::ERR__STATE); + } + } else { + return Baton(RdKafka::ErrorCode::ERR__STATE); + } + + if (!errstr.empty()) { + return Baton(RdKafka::ErrorCode::ERR_TOPIC_EXCEPTION, errstr); + } + + // Maybe do it this way later? Then we don't need to do static_cast + // + return Baton(topic); + } + + Baton GetMetadata(bool all_topics, std::string topic_name, int timeout_ms) { + RdKafka::Topic* topic = NULL; + RdKafka::ErrorCode err; + + std::string errstr; + + if (!topic_name.empty()) { + Baton b = CreateTopic(topic_name); + if (b.err() == RdKafka::ErrorCode::ERR_NO_ERROR) { + topic = b.data(); + } + } + + RdKafka::Metadata* metadata = NULL; + + if (!errstr.empty()) { + return Baton(RdKafka::ERR_TOPIC_EXCEPTION); + } + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->metadata(all_topics, topic, &metadata, timeout_ms); + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + if (topic != NULL) + delete topic; + + if (err == RdKafka::ERR_NO_ERROR) { + return Baton(metadata); + } else { + // metadata is not set here + // @see https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 // NOLINT + return Baton(err); + } + } + + Baton QueryWatermarkOffsets( + std::string topic_name, int32_t partition, + int64_t* low_offset, int64_t* high_offset, + int timeout_ms) { + // Check if we are connected first + + RdKafka::ErrorCode err; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->query_watermark_offsets(topic_name, partition, + low_offset, high_offset, timeout_ms); + + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + return Baton(err); + } + + /** + * Look up the offsets for the given partitions by timestamp. + * + * The returned offset for each partition is the earliest offset whose + * timestamp is greater than or equal to the given timestamp in the + * corresponding partition. + * + * @returns A baton specifying the error state. If there was no error, + * there still may be an error on a topic partition basis. + */ + Baton OffsetsForTimes( + std::vector &toppars, + int timeout_ms) { + // Check if we are connected first + + RdKafka::ErrorCode err; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->offsetsForTimes(toppars, timeout_ms); + + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + return Baton(err); + } + + Baton SetSaslCredentials( + std::string username, std::string password) { + RdKafka::Error *error; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + error = m_client->sasl_set_credentials(username, password); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + return rdkafkaErrorToBaton(error); + } + + Baton SetOAuthBearerToken( + const std::string& value, int64_t lifetime_ms, + const std::string& principal_name, + const std::list& extensions) { + RdKafka::ErrorCode error_code; + std::string errstr; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token( + value, lifetime_ms, principal_name, extensions, errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + if (error_code != RdKafka::ERR_NO_ERROR) { + return Baton(error_code, errstr); + } + + return Baton(error_code); + } + + Baton SetOAuthBearerTokenFailure(const std::string& errstr) { + RdKafka::ErrorCode error_code; - RdKafka::Handle* GetClient(); + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token_failure(errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } - static RdKafka::TopicPartition* GetPartition(std::string &); - static RdKafka::TopicPartition* GetPartition(std::string &, int); + return Baton(error_code); + } + + RdKafka::Handle* GetClient() { + return m_client; + } + + static RdKafka::TopicPartition* GetPartition(std::string &topic) { + return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA); + } + + static RdKafka::TopicPartition* GetPartition(std::string &topic, int partition) { // NOLINT + return RdKafka::TopicPartition::create(topic, partition); + } Callbacks::Event m_event_cb; @@ -73,21 +265,127 @@ class Connection : public Nan::ObjectWrap { virtual void DeactivateDispatchers() = 0; virtual void ConfigureCallback( - const std::string &string_key, const v8::Local &cb, bool add); + const std::string &string_key, const Napi::Function &cb, bool add) { + if (string_key.compare("event_cb") == 0) { + if (add) { + this->m_event_cb.dispatcher.AddCallback(cb); + } else { + this->m_event_cb.dispatcher.RemoveCallback(cb); + } + } + } + + std::string Name() const { + if (!IsConnected()) { + return std::string(""); + } + return std::string(m_client->name()); + } + + +protected: + Connection(const Napi::CallbackInfo &info): m_event_cb() { + Napi::Env env = info.Env(); + if (!info.IsConstructCall()) { + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + } + + if (info.Length() < 2) { + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + + } + } + + void Config(Conf *gconfig, Conf *tconfig) { + this->m_gconfig = gconfig; + this->m_tconfig = tconfig; + + std::string errstr; + + m_client = NULL; + m_is_closing = false; + uv_rwlock_init(&m_connection_lock); + + // Try to set the event cb. Shouldn't be an error here, but if there + // is, it doesn't get reported. + // + // Perhaps node new methods should report this as an error? But there + // isn't anything the user can do about it. + m_gconfig->set("event_cb", &m_event_cb, errstr); + } + explicit Connection(Connection *existing): + m_event_cb() { + m_client = existing->m_client; + + m_gconfig = existing->m_gconfig; + m_tconfig = existing->m_tconfig; + + m_is_closing = false; + m_has_underlying = true; + + // We must share the same connection lock as the existing connection to + // avoid getting disconnected while the existing connection is still in use. + m_connection_lock = existing->m_connection_lock; + } + virtual ~Connection() { + // The underlying connection will take care of cleanup. + if (m_has_underlying) { + return; + } - std::string Name() const; + uv_rwlock_destroy(&m_connection_lock); + if (m_tconfig) { + delete m_tconfig; + } - protected: - Connection(Conf*, Conf*); - explicit Connection(Connection *); - ~Connection(); + if (m_gconfig) { + delete m_gconfig; + } + } - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); - static Baton rdkafkaErrorToBaton(RdKafka::Error* error); + static Napi::FunctionReference constructor; - Baton setupSaslOAuthBearerConfig(); - Baton setupSaslOAuthBearerBackgroundQueue(); + static Baton rdkafkaErrorToBaton(RdKafka::Error* error) { + if (NULL == error) { + return Baton(RdKafka::ERR_NO_ERROR); + } else { + Baton result(error->code(), error->str(), error->is_fatal(), + error->is_retriable(), error->txn_requires_abort()); + delete error; + return result; + } + } + + // If OAUTHBEARER authentication is set up, then push the callbacks onto the + // SASL queue so we don't need to keep polling. This method should be called + // before the client is created. + Baton setupSaslOAuthBearerConfig() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + std::string errstr; + if (m_gconfig->enable_sasl_queue(true, errstr) != RdKafka::Conf::CONF_OK) { + return Baton(RdKafka::ERR__STATE, errstr); + } + + return Baton(RdKafka::ERR_NO_ERROR); + } + + // If OAUTHBEARER authentication is set up, then handle the callbacks on + // the background thread. This method should be called after the client is + // created and only if `setupSaslOAuthBearerConfig` is called earlier. + Baton setupSaslOAuthBearerBackgroundQueue() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); + return rdkafkaErrorToBaton(error); + } + + // Baton setupSaslOAuthBearerConfig(); + // Baton setupSaslOAuthBearerBackgroundQueue(); bool m_is_closing; @@ -98,16 +396,327 @@ class Connection : public Nan::ObjectWrap { uv_rwlock_t m_connection_lock; bool m_has_underlying = false; - RdKafka::Handle* m_client; + RdKafka::Handle *m_client; + + // NAPI Methods + + + Napi::Value NodeGetMetadata(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + Connection* obj = this; + + Napi::Object config; + if (info[0].IsObject()) { + config = info[0].As(); + } else { + config = Napi::Object::New(env); + } + + if (!info[1].IsFunction()) { + Napi::Error::New(env, "Second parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); + } + + Napi::Function cb = info[1].As(); + + std::string topic = GetParameter(config, "topic", ""); + bool allTopics = GetParameter(config, "allTopics", true); + int timeout_ms = GetParameter(config, "timeout", 30000); + + Napi::FunctionReference* callback = new Napi::FunctionReference(); + *callback = Napi::Persistent(cb); + + Napi::AsyncWorker *worker = new Workers::ConnectionMetadata( + callback, obj, topic, timeout_ms, allTopics); + worker->Queue(); + + return env.Null(); + } + + Napi::Value NodeOffsetsForTimes(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (info.Length() < 3 || !info[0].IsArray()) { + // Just throw an exception + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); + } + + std::vector toppars = + Conversion::TopicPartition::FromV8Array(info[0].As()); + + int timeout_ms = info[1].As().Int32Value(); + + Napi::Function cb = info[2].As(); + Napi::FunctionReference callback = Napi::Persistent(cb); + + Connection* handle = this; + + Napi::AsyncWorker *worker = new Workers::Handle::OffsetsForTimes( + callback, handle, toppars, timeout_ms); + worker->Queue(); + + return env.Null(); + } + + Napi::Value NodeQueryWatermarkOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + Connection* obj = this; + + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); + ; + return env.Null(); + } + + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[2].IsNumber()) { + Napi::Error::New(env, "3rd parameter must be a number of milliseconds").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[3].IsFunction()) { + Napi::Error::New(env, "4th parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the topic name + std::string topicUTF8 = info[0].ToString().Utf8Value(); + // The first parameter is the topic + std::string topic_name(topicUTF8); + + // Second parameter is the partition + int32_t partition = info[1].As().Int32Value(); + + // Third parameter is the timeout + int timeout_ms = info[2].As().Int32Value(); + + // Fourth parameter is the callback + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + Napi::AsyncWorker *worker = new Workers::ConnectionQueryWatermarkOffsets( + callback, obj, topic_name, partition, timeout_ms); + worker->Queue(); + + return env.Null(); + } + + Napi::Value NodeSetSaslCredentials(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a username string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[1].IsString()) { + Napi::Error::New(env, "2nd parameter must be a password string").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the username + std::string usernameUTF8 = info[0].As().Utf8Value(); + // The first parameter is the username + std::string username(usernameUTF8); + + // Get string pointer for the password + std::string passwordUTF8 = info[1].As().Utf8Value(); + // The first parameter is the password + std::string password(passwordUTF8); + + Baton b = this->SetSaslCredentials(username, password); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + b.ToError(env).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } + + + // Node methods + Napi::Value NodeConfigureCallbacks(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (info.Length() < 2 || + !info[0].IsBoolean() || + !info[1].IsObject()) { + // Just throw an exception + Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); + return env.Null(); + } + + Connection* obj = this; + + const bool add = info[0].As().Value(); + + Napi::Object configs_object = info[1].ToObject(); + Napi::Array configs_property_names = + configs_object.GetPropertyNames(); + + for (unsigned int j = 0; j < configs_property_names.Length(); ++j) { + std::string configs_string_key; + + Napi::Value configs_key = + (configs_property_names).Get(j); + Napi::Value configs_value = + (configs_object).Get(configs_key); + + int config_type = 0; + if (configs_value.IsObject() && configs_key.IsString()) { + std::string configs_utf8_key = configs_key.As(); + configs_string_key = std::string(configs_utf8_key); + if (configs_string_key.compare("global") == 0) { + config_type = 1; + } else if (configs_string_key.compare("topic") == 0) { + config_type = 2; + } else if (configs_string_key.compare("event") == 0) { + config_type = 3; + } else { + continue; + } + } else { + continue; + } + + Napi::Object object = + configs_value.ToObject(); + Napi::Array property_names = + object.GetPropertyNames(); + + for (unsigned int i = 0; i < property_names.Length(); ++i) { + std::string errstr; + std::string string_key; + + Napi::Value key = (property_names).Get(i); + Napi::Value value = (object).Get(key); + + if (key.IsString()) { + std::string utf8_key = key.As(); + string_key = std::string(utf8_key); + } else { + continue; + } + + if (value.IsFunction()) { + Napi::Function cb = value.As(); + switch (config_type) { + case 1: + obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 2: + obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 3: + obj->ConfigureCallback(string_key, cb, add); + break; + } + } + } + } + + return Napi::Boolean::From(env, true); + } + + Napi::Value NodeSetOAuthBearerToken(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a token string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a lifetime_ms number").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[2].IsString()) { + Napi::Error::New(env, "3rd parameter must be a principal_name string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[3].IsNull() && !info[3].IsUndefined() && !info[3].IsArray()) { + Napi::Error::New(env, "4th parameter must be an extensions array or null").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the token + std::string tokenUtf8 = info[0].As().Utf8Value(); + std::string token(tokenUtf8); + + // Get the lifetime_ms + int64_t lifetime_ms = info[1].As().Int64Value(); + + // Get string pointer for the principal_name + std::string principal_nameUtf8 = + info[2].As().Utf8Value(); + std::string principal_name(principal_nameUtf8); + + // Get the extensions (if any) + std::list extensions; + if (!info[3].IsNull() && !info[3].IsUndefined()) { + Napi::Array extensionsArray = info[3].As(); + extensions = v8ArrayToStringList(extensionsArray); + } + + Connection* obj = this; + Baton b = + obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + b.ToError(env).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } + + Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the error string + std::string errstrUtf8 = info[0].As().Utf8Value(); + std::string errstr(errstrUtf8); + + Baton b = this->SetOAuthBearerTokenFailure(errstr); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + b.ToError(env).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } + + Napi::Value NodeName(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); - static NAN_METHOD(NodeConfigureCallbacks); - static NAN_METHOD(NodeGetMetadata); - static NAN_METHOD(NodeQueryWatermarkOffsets); - static NAN_METHOD(NodeOffsetsForTimes); - static NAN_METHOD(NodeSetSaslCredentials); - static NAN_METHOD(NodeSetOAuthBearerToken); - static NAN_METHOD(NodeSetOAuthBearerTokenFailure); - static NAN_METHOD(NodeName); + return Napi::String::From(env, this->Name()); + } }; } // namespace NodeKafka diff --git a/src/errors.cc b/src/errors.cc index 9d1d9675..5799f85d 100644 --- a/src/errors.cc +++ b/src/errors.cc @@ -14,40 +14,47 @@ namespace NodeKafka { -v8::Local RdKafkaError(const RdKafka::ErrorCode &err, +Napi::Error RdKafkaError(const Napi::Env& env, const RdKafka::ErrorCode &err, const std::string &errstr) { int code = static_cast(err); - v8::Local ret = Nan::New(); + Napi::Error ret = Napi::Error::New(env); - Nan::Set(ret, Nan::New("message").ToLocalChecked(), - Nan::New(errstr).ToLocalChecked()); - Nan::Set(ret, Nan::New("code").ToLocalChecked(), - Nan::New(code)); + (ret).Set(Napi::String::New(env, "message"), + Napi::String::New(env, errstr)); + (ret).Set(Napi::String::New(env, "code"), + Napi::Number::New(env, code)); return ret; } -v8::Local RdKafkaError(const RdKafka::ErrorCode &err) { +Napi::Error RdKafkaError(const Napi::Env& env, const RdKafka::ErrorCode &err) { std::string errstr = RdKafka::err2str(err); - return RdKafkaError(err, errstr); + return RdKafkaError(env, err, errstr); } -v8::Local RdKafkaError( +Napi::Error RdKafkaError( + const Napi::Env& env, const RdKafka::ErrorCode &err, std::string errstr, bool isFatal, bool isRetriable, bool isTxnRequiresAbort) { - v8::Local ret = RdKafkaError(err, errstr); + Napi::Error ret = RdKafkaError(env, err, errstr); - Nan::Set(ret, Nan::New("isFatal").ToLocalChecked(), - Nan::New(isFatal)); - Nan::Set(ret, Nan::New("isRetriable").ToLocalChecked(), - Nan::New(isRetriable)); - Nan::Set(ret, Nan::New("isTxnRequiresAbort").ToLocalChecked(), - Nan::New(isTxnRequiresAbort)); + (ret).Set(Napi::String::New(env, "isFatal"), + Napi::Boolean::New(env, isFatal)); + (ret).Set(Napi::String::New(env, "isRetriable"), + Napi::Boolean::New(env, isRetriable)); + (ret).Set(Napi::String::New(env, "isTxnRequiresAbort"), + Napi::Boolean::New(env, isTxnRequiresAbort)); return ret; } +Napi::Value ThrowError(const Napi::Env& env, const std::string &message) { + Napi::Error error = Napi::Error::New(env, message); + error.ThrowAsJavaScriptException(); + return error.Value(); +} + Baton::Baton(const RdKafka::ErrorCode &code) { m_err = code; } @@ -92,16 +99,16 @@ Baton Baton::BatonFromErrorAndDestroy(RdKafka::Error *error) { return Baton(err, errstr); } -v8::Local Baton::ToObject() { +Napi::Error Baton::ToError(const Napi::Env& env) { if (m_errstr.empty()) { - return RdKafkaError(m_err); + return RdKafkaError(env, m_err); } else { - return RdKafkaError(m_err, m_errstr); + return RdKafkaError(env, m_err, m_errstr); } } -v8::Local Baton::ToTxnObject() { - return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT +Napi::Error Baton::ToTxnError(const Napi::Env& env) { + return RdKafkaError(env, m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT } RdKafka::ErrorCode Baton::err() { diff --git a/src/errors.h b/src/errors.h index 248d26ad..a3f21ab9 100644 --- a/src/errors.h +++ b/src/errors.h @@ -11,7 +11,8 @@ #ifndef SRC_ERRORS_H_ #define SRC_ERRORS_H_ -#include +#include +#include #include #include @@ -39,8 +40,8 @@ class Baton { RdKafka::ErrorCode err(); std::string errstr(); - v8::Local ToObject(); - v8::Local ToTxnObject(); + Napi::Error ToError(const Napi::Env &env); + Napi::Error ToTxnError(const Napi::Env &env); private: void* m_data; @@ -50,10 +51,13 @@ class Baton { bool m_isRetriable; bool m_isTxnRequiresAbort; }; - -v8::Local RdKafkaError(const RdKafka::ErrorCode &); -v8::Local RdKafkaError(const RdKafka::ErrorCode &, - const std::string &); + +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &); +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &, const std::string &); +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &err, + std::string errstr, bool isFatal, bool isRetriable, + bool isTxnRequiresAbort); +Napi::Value ThrowError(const Napi::Env &env, const std::string &); } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 4bc778d4..eddaa0d1 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -12,11 +12,10 @@ #include #include +#include "src/errors.h" #include "src/kafka-consumer.h" #include "src/workers.h" -using Nan::FunctionCallbackInfo; - namespace NodeKafka { /** @@ -29,16 +28,55 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -KafkaConsumer::KafkaConsumer(Conf* gconfig, Conf* tconfig): - Connection(gconfig, tconfig) { - std::string errstr; +KafkaConsumer::KafkaConsumer(const Napi::CallbackInfo& info): Connection(info) { + Napi::Env env = info.Env(); + if (!info.IsConstructCall()) { + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return; + } + + if (info.Length() < 2) { + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + return; + } + + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return; + } + + std::string errstr; + + Napi::Object i1 = info[0].ToObject(); + + Conf* gconfig = + Conf::create(RdKafka::Conf::CONF_GLOBAL, info[0].ToObject(), errstr); + + if (!gconfig) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; + } - if (m_tconfig) - m_gconfig->set("default_topic_conf", m_tconfig, errstr); + // If tconfig isn't set, then just let us pick properties from gconf. + Conf* tconfig = nullptr; + if (info[1].IsObject()) { + tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, info[1].ToObject(), errstr); - m_consume_loop = nullptr; + if (!tconfig) { + delete gconfig; + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; + } } + this->Config(gconfig, tconfig); + + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); + + m_consume_loop = nullptr; +} + KafkaConsumer::~KafkaConsumer() { // We only want to run this if it hasn't been run already Disconnect(); @@ -129,8 +167,8 @@ void KafkaConsumer::DeactivateDispatchers() { } void KafkaConsumer::ConfigureCallback(const std::string& string_key, - const v8::Local& cb, - bool add) { + const Napi::Function& cb, + bool add) { if (string_key.compare("queue_non_empty_cb") == 0) { if (add) { this->m_queue_not_empty_cb.dispatcher.AddCallback(cb); @@ -175,7 +213,7 @@ Baton KafkaConsumer::GetWatermarkOffsets( if (IsConnected()) { // Always send true - we err = m_client->get_watermark_offsets(topic_name, partition, - low_offset, high_offset); + low_offset, high_offset); } else { err = RdKafka::ERR__STATE; } @@ -267,13 +305,13 @@ Baton KafkaConsumer::IncrementalUnassign( // For now, use two for loops. Make more efficient if needed later. for (unsigned int i = 0; i < partitions.size(); i++) { for (unsigned int j = 0; j < m_partitions.size(); j++) { - if (partitions[i]->partition() == m_partitions[j]->partition() && - partitions[i]->topic() == m_partitions[j]->topic()) { - delete_partitions.push_back(m_partitions[j]); - m_partitions.erase(m_partitions.begin() + j); - m_partition_cnt--; - break; - } + if (partitions[i]->partition() == m_partitions[j]->partition() && + partitions[i]->topic() == m_partitions[j]->topic()) { + delete_partitions.push_back(m_partitions[j]); + m_partitions.erase(m_partitions.begin() + j); + m_partition_cnt--; + break; + } } } } @@ -469,12 +507,12 @@ Baton KafkaConsumer::Consume(int timeout_ms) { RdKafka::ErrorCode response_code = message->err(); // we want to handle these errors at the call site if (response_code != RdKafka::ERR_NO_ERROR && - response_code != RdKafka::ERR__PARTITION_EOF && - response_code != RdKafka::ERR__TIMED_OUT && - response_code != RdKafka::ERR__TIMED_OUT_QUEUE + response_code != RdKafka::ERR__PARTITION_EOF && + response_code != RdKafka::ERR__TIMED_OUT && + response_code != RdKafka::ERR__TIMED_OUT_QUEUE ) { - delete message; - return Baton(response_code); + delete message; + return Baton(response_code); } return Baton(message); @@ -522,278 +560,223 @@ std::string KafkaConsumer::RebalanceProtocol() { return m_consumer->rebalance_protocol(); } -Nan::Persistent KafkaConsumer::constructor; - -void KafkaConsumer::Init(v8::Local exports) { - Nan::HandleScope scope; - - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("KafkaConsumer").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); - - /* - * Lifecycle events inherited from NodeKafka::Connection - * - * @sa NodeKafka::Connection - */ - - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); - - /* - * @brief Methods to do with establishing state - */ - - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT - Nan::SetPrototypeMethod(tpl, "offsetsForTimes", NodeOffsetsForTimes); - Nan::SetPrototypeMethod(tpl, "getWatermarkOffsets", NodeGetWatermarkOffsets); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); - - /* - * @brief Methods exposed to do with message retrieval - */ - Nan::SetPrototypeMethod(tpl, "subscription", NodeSubscription); - Nan::SetPrototypeMethod(tpl, "subscribe", NodeSubscribe); - Nan::SetPrototypeMethod(tpl, "unsubscribe", NodeUnsubscribe); - Nan::SetPrototypeMethod(tpl, "consumeLoop", NodeConsumeLoop); - Nan::SetPrototypeMethod(tpl, "consume", NodeConsume); - Nan::SetPrototypeMethod(tpl, "seek", NodeSeek); - - /** - * @brief Pausing and resuming - */ - Nan::SetPrototypeMethod(tpl, "pause", NodePause); - Nan::SetPrototypeMethod(tpl, "resume", NodeResume); - - /* - * @brief Methods to do with partition assignment / rebalancing - */ - - Nan::SetPrototypeMethod(tpl, "committed", NodeCommitted); - Nan::SetPrototypeMethod(tpl, "position", NodePosition); - Nan::SetPrototypeMethod(tpl, "assign", NodeAssign); - Nan::SetPrototypeMethod(tpl, "unassign", NodeUnassign); - Nan::SetPrototypeMethod(tpl, "incrementalAssign", NodeIncrementalAssign); - Nan::SetPrototypeMethod(tpl, "incrementalUnassign", NodeIncrementalUnassign); - Nan::SetPrototypeMethod(tpl, "assignments", NodeAssignments); - Nan::SetPrototypeMethod(tpl, "assignmentLost", NodeAssignmentLost); - Nan::SetPrototypeMethod(tpl, "rebalanceProtocol", NodeRebalanceProtocol); - - Nan::SetPrototypeMethod(tpl, "commit", NodeCommit); - Nan::SetPrototypeMethod(tpl, "commitSync", NodeCommitSync); - Nan::SetPrototypeMethod(tpl, "commitCb", NodeCommitCb); - Nan::SetPrototypeMethod(tpl, "offsetsStore", NodeOffsetsStore); - Nan::SetPrototypeMethod(tpl, "offsetsStoreSingle", NodeOffsetsStoreSingle); - - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); - Nan::Set(exports, Nan::New("KafkaConsumer").ToLocalChecked(), - (tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked()); +Napi::FunctionReference KafkaConsumer::constructor; + +void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::Function KafkaConsumer = DefineClass(env, "KafkaConsumer", { + /* + * Lifecycle events inherited from NodeKafka::Connection + * + * @sa NodeKafka::Connection + */ + + InstanceMethod("configureCallbacks", &KafkaConsumer::NodeConfigureCallbacks), + + /* + * @brief Methods to do with establishing state + */ + InstanceMethod("connect", &KafkaConsumer::NodeConnect), + InstanceMethod("disconnect", &KafkaConsumer::NodeDisconnect), + InstanceMethod("getMetadata", &KafkaConsumer::NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &KafkaConsumer::NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("offsetsForTimes", &KafkaConsumer::NodeOffsetsForTimes), + InstanceMethod("getWatermarkOffsets", &KafkaConsumer::NodeGetWatermarkOffsets), + InstanceMethod("setSaslCredentials", &KafkaConsumer::NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &KafkaConsumer::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure", &KafkaConsumer::NodeSetOAuthBearerTokenFailure), + + /* + * @brief Methods exposed to do with message retrieval + */ + InstanceMethod("subscription", &KafkaConsumer::NodeSubscription), + InstanceMethod("subscribe", &KafkaConsumer::NodeSubscribe), + InstanceMethod("unsubscribe", &KafkaConsumer::NodeUnsubscribe), + InstanceMethod("consumeLoop", &KafkaConsumer::NodeConsumeLoop), + InstanceMethod("consume", &KafkaConsumer::NodeConsume), + InstanceMethod("seek", &KafkaConsumer::NodeSeek), + + + /** + * @brief Pausing and resuming + */ + InstanceMethod("pause", &KafkaConsumer::NodePause), + InstanceMethod("resume", &KafkaConsumer::NodeResume), + + + /* + * @brief Methods to do with partition assignment / rebalancing + */ + + InstanceMethod("committed", &KafkaConsumer::NodeCommitted), + InstanceMethod("position", &KafkaConsumer::NodePosition), + InstanceMethod("assign", &KafkaConsumer::NodeAssign), + InstanceMethod("unassign", &KafkaConsumer::NodeUnassign), + InstanceMethod("incrementalAssign", &KafkaConsumer::NodeIncrementalAssign), + InstanceMethod("incrementalUnassign", &KafkaConsumer::NodeIncrementalUnassign), + InstanceMethod("assignments", &KafkaConsumer::NodeAssignments), + InstanceMethod("assignmentLost", &KafkaConsumer::NodeAssignmentLost), + InstanceMethod("rebalanceProtocol", &KafkaConsumer::NodeRebalanceProtocol), + + InstanceMethod("commit", &KafkaConsumer::NodeCommit), + InstanceMethod("commitSync", &KafkaConsumer::NodeCommitSync), + InstanceMethod("commitCb", &KafkaConsumer::NodeCommitCb), + InstanceMethod("offsetsStore", &KafkaConsumer::NodeOffsetsStore), + InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), + }); + + constructor.Reset(KafkaConsumer); + exports.Set(Napi::String::New(env, "KafkaConsumer"), KafkaConsumer); } -void KafkaConsumer::New(const Nan::FunctionCallbackInfo& info) { - if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); - } - - if (info.Length() < 2) { - return Nan::ThrowError("You must supply global and topic configuration"); - } - - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); - } - - std::string errstr; - - Conf* gconfig = - Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); - - if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); - } - - // If tconfig isn't set, then just let us pick properties from gconf. - Conf* tconfig = nullptr; - if (info[1]->IsObject()) { - tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, - (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); - - if (!tconfig) { - delete gconfig; - return Nan::ThrowError(errstr.c_str()); - } - } +// Napi::Object KafkaConsumer::NewInstance(Napi::Value arg) { +// Napi::Env env = arg.Env(); +// Napi::EscapableHandleScope scope(env); - // TODO: fix this - this memory is leaked. - KafkaConsumer* consumer = new KafkaConsumer(gconfig, tconfig); +// const unsigned argc = 1; - // Wrap it - consumer->Wrap(info.This()); +// Napi::Value argv[argc] = { arg }; +// Napi::Function cons = Napi::Function::New(env, constructor); +// Napi::Object instance = +// Napi::NewInstance(cons, argc, argv); - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - info.GetReturnValue().Set(info.This()); -} - -v8::Local KafkaConsumer::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; - - const unsigned argc = 1; - - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); - - return scope.Escape(instance); -} +// return scope.Escape(instance); +// } /* Node exposed methods */ -NAN_METHOD(KafkaConsumer::NodeCommitted) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitted(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[0]->IsArray()) { + if (info.Length() < 3 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); + uint32_t maybeTimeout = + info[1].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); - } + timeout_ms = static_cast(maybeTimeout); - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + Napi::AsyncWorker *worker = + new Workers::KafkaConsumerCommitted(callback, this, toppars, timeout_ms); - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerCommitted(callback, consumer, - toppars, timeout_ms)); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeSubscription) { - Nan::HandleScope scope; - - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); +Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Baton b = consumer->Subscription(); + Baton b = this->Subscription(); if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } std::vector * topics = b.data*>(); - info.GetReturnValue().Set(Conversion::Util::ToV8Array(*topics)); + return Conversion::Util::ToV8Array(*topics); delete topics; } -NAN_METHOD(KafkaConsumer::NodePosition) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Position(toppars); + Baton b = this->Position(toppars); if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } - info.GetReturnValue().Set( - Conversion::TopicPartition::ToV8Array(toppars)); + return + Conversion::TopicPartition::ToV8Array(toppars); // Delete the underlying topic partitions RdKafka::TopicPartition::destroy(toppars); } -NAN_METHOD(KafkaConsumer::NodeAssignments) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeAssignments(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Baton b = consumer->RefreshAssignments(); + Baton b = this->RefreshAssignments(); if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } - info.GetReturnValue().Set( - Conversion::TopicPartition::ToV8Array(consumer->m_partitions)); + return + Conversion::TopicPartition::ToV8Array(this->m_partitions); } -NAN_METHOD(KafkaConsumer::NodeAssignmentLost) { - Nan::HandleScope scope; - - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); +Napi::Value KafkaConsumer::NodeAssignmentLost(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Baton b = consumer->AssignmentLost(); + Baton b = this->AssignmentLost(); bool lost = b.data(); - info.GetReturnValue().Set(Nan::New(lost)); + return Napi::Boolean::New(env, lost); } -NAN_METHOD(KafkaConsumer::NodeRebalanceProtocol) { - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::string protocol = consumer->RebalanceProtocol(); - info.GetReturnValue().Set(Nan::New(protocol).ToLocalChecked()); +Napi::Value KafkaConsumer::NodeRebalanceProtocol(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + std::string protocol = this->RebalanceProtocol(); + return Napi::String::New(env, protocol); } -NAN_METHOD(KafkaConsumer::NodeAssign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; - if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -803,75 +786,74 @@ NAN_METHOD(KafkaConsumer::NodeAssign) { RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->Assign(topic_partitions); + Baton b = this->Assign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - Nan::ThrowError(RdKafka::err2str(b.err()).c_str()); + Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return Napi::Value::From(env, true); } -NAN_METHOD(KafkaConsumer::NodeUnassign) { - Nan::HandleScope scope; - - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - +Napi::Value KafkaConsumer::NodeUnassign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (!consumer->IsClosing() && !consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + if (!this->IsClosing() && !this->IsConnected()) { + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } - Baton b = consumer->Unassign(); + Baton b = this->Unassign(); if (b.err() != RdKafka::ERR_NO_ERROR) { - Nan::ThrowError(RdKafka::err2str(b.err()).c_str()); + Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return Napi::Value::From(env, true); } -NAN_METHOD(KafkaConsumer::NodeIncrementalAssign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; - if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -881,56 +863,54 @@ NAN_METHOD(KafkaConsumer::NodeIncrementalAssign) { RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->IncrementalAssign(topic_partitions); + Baton b = this->IncrementalAssign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - Nan::ThrowError(errorObject); + b.ToError(env).ThrowAsJavaScriptException(); } - info.GetReturnValue().Set(Nan::True()); + return Napi::Value::From(env, true); } -NAN_METHOD(KafkaConsumer::NodeIncrementalUnassign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; - if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -940,321 +920,334 @@ NAN_METHOD(KafkaConsumer::NodeIncrementalUnassign) { RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->IncrementalUnassign(topic_partitions); + Baton b = this->IncrementalUnassign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - Nan::ThrowError(errorObject); + Napi::Error errorObject = b.ToError(env); + errorObject.ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return Napi::Value::From(env, true); } -NAN_METHOD(KafkaConsumer::NodeUnsubscribe) { - Nan::HandleScope scope; - - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); +Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Baton b = consumer->Unsubscribe(); + Baton b = this->Unsubscribe(); - info.GetReturnValue().Set(Nan::New(static_cast(b.err()))); + return Napi::Value::From(env, b.err()); } -NAN_METHOD(KafkaConsumer::NodeCommit) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); int error_code; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + if (!this->IsConnected()) { + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } - if (info[0]->IsNull() || info[0]->IsUndefined()) { - Baton b = consumer->Commit(); + if (info[0].IsNull() || info[0].IsUndefined()) { + Baton b = this->Commit(); error_code = static_cast(b.err()); - } else if (info[0]->IsArray()) { + } else if (info[0].IsArray()) { std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Commit(toppars); + Baton b = this->Commit(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); - } else if (info[0]->IsObject()) { + } else if (info[0].IsObject()) { RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (toppar == NULL) { - Nan::ThrowError("Invalid topic partition provided"); - return; + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } - Baton b = consumer->Commit(toppar); + Baton b = this->Commit(toppar); error_code = static_cast(b.err()); delete toppar; } else { - Nan::ThrowError("First parameter must be an object or an array"); - return; + Napi::Error::New(env, "First parameter must be an object or an array").ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeCommitSync) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); int error_code; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + if (!this->IsConnected()) { + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } - if (info[0]->IsNull() || info[0]->IsUndefined()) { - Baton b = consumer->CommitSync(); + if (info[0].IsNull() || info[0].IsUndefined()) { + Baton b = this->CommitSync(); error_code = static_cast(b.err()); - } else if (info[0]->IsArray()) { + } else if (info[0].IsArray()) { std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->CommitSync(toppars); + Baton b = this->CommitSync(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); - } else if (info[0]->IsObject()) { + } else if (info[0].IsObject()) { RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (toppar == NULL) { - Nan::ThrowError("Invalid topic partition provided"); - return; + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } - Baton b = consumer->CommitSync(toppar); + Baton b = this->CommitSync(toppar); error_code = static_cast(b.err()); delete toppar; } else { - Nan::ThrowError("First parameter must be an object or an array"); - return; + Napi::Error::New(env, "First parameter must be an object or an array").ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeCommitCb) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); int error_code; std::optional> toppars = std::nullopt; - Nan::Callback *callback; + Napi::FunctionReference *callback; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + if (!this->IsConnected()) { + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() != 2) { - Nan::ThrowError("Two arguments are required"); - return; + Napi::Error::New(env, "Two arguments are required").ThrowAsJavaScriptException(); + return env.Null(); } if (!( - (info[0]->IsArray() || info[0]->IsNull()) && - info[1]->IsFunction())) { - Nan::ThrowError( - "First argument should be an array or null and second one a callback"); - return; + (info[0].IsArray() || info[0].IsNull()) && + info[1].IsFunction())) { + Napi::Error::New(env, + "First argument should be an array or null and second one a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (info[0]->IsArray()) { + if (info[0].IsArray()) { toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); } - callback = new Nan::Callback(info[1].As()); - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerCommitCb(callback, consumer, - toppars)); + callback = new Napi::FunctionReference(); + callback->Reset(info[1].As()); + + + Workers::KafkaConsumerCommitCb *worker = + new Workers::KafkaConsumerCommitCb(callback, this, toppars); - info.GetReturnValue().Set(Nan::Null()); + worker->Queue(); + + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeSubscribe) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("First parameter must be an array"); + Napi::Error::New(env, "First parameter must be an array").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - v8::Local topicsArray = info[0].As(); + Napi::Array topicsArray = info[0].As(); std::vector topics = Conversion::Util::ToStringVector(topicsArray); - Baton b = consumer->Subscribe(topics); + Baton b = this->Subscribe(topics); int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeSeek) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 3) { - return Nan::ThrowError("Must provide a topic partition, timeout, and callback"); // NOLINT + Napi::Error::New(env, "Must provide a topic partition, timeout, and callback").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Topic partition must be an object"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Topic partition must be an object").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber() && !info[1]->IsNull()) { - return Nan::ThrowError("Timeout must be a number."); + if (!info[1].IsNumber() && !info[1].IsNull()) { + Napi::Error::New(env, "Timeout must be a number.").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsFunction()) { - return Nan::ThrowError("Callback must be a function"); + if (!info[2].IsFunction()) { + Napi::Error::New(env, "Callback must be a function").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); + // Nan::Maybe maybeTimeout = + // Nan::To(info[1].As()); + uint32_t maybeTimeout = + info[1].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); - // Do not allow timeouts of less than 10. Providing 0 causes segfaults - // because it makes it asynchronous. - if (timeout_ms < 10) { - timeout_ms = 10; - } - } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + timeout_ms = static_cast(maybeTimeout); + // Do not allow timeouts of less than 10. Providing 0 causes segfaults + // because it makes it asynchronous. + if (timeout_ms < 10) { + timeout_ms = 10; + + } const RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (!toppar) { - return Nan::ThrowError("Invalid topic partition provided"); + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } - Nan::Callback *callback = new Nan::Callback(info[2].As()); - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerSeek(callback, consumer, toppar, timeout_ms)); + Napi::FunctionReference *callback = new Napi::FunctionReference(); - info.GetReturnValue().Set(Nan::Null()); + callback->Reset(info[2].As()); + + Napi::AsyncWorker *worker = + new Workers::KafkaConsumerSeek(callback, this, toppar, timeout_ms); + + worker->Queue(); + + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeOffsetsStore) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->OffsetsStore(toppars); + Baton b = this->OffsetsStore(toppars); RdKafka::TopicPartition::destroy(toppars); int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeOffsetsStoreSingle) { - Nan::HandleScope scope; +Napi::Value +KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, partition, // offset, and leader epoch), we can't call this. if (info.Length() < 4) { - return Nan::ThrowError( - "Must provide topic, partition, offset and leaderEpoch"); + Napi::Error::New(env, + "Must provide topic, partition, offset and leaderEpoch") + .ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); - const std::string& topic_name(*topicUTF8); + std::string topicUTF8 = info[0].As().Utf8Value(); + const std::string& topic_name(topicUTF8); - int64_t partition = Nan::To(info[1]).FromJust(); - int64_t offset = Nan::To(info[2]).FromJust(); - int64_t leader_epoch = Nan::To(info[3]).FromJust(); + int64_t partition = info[1].As().Int64Value(); + int64_t offset = info[2].As().Int64Value(); + int64_t leader_epoch = info[3].As().Int64Value(); RdKafka::TopicPartition* toppar = RdKafka::TopicPartition::create(topic_name, partition, offset); toppar->set_leader_epoch(leader_epoch); std::vector toppars = {toppar}; - Baton b = consumer->OffsetsStore(toppars); + Baton b = this->OffsetsStore(toppars); delete toppar; int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodePause) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Pause(toppars); + Baton b = this->Pause(toppars); RdKafka::TopicPartition::destroy(toppars); #if 0 @@ -1271,28 +1264,29 @@ NAN_METHOD(KafkaConsumer::NodePause) { #endif int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeResume) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); // NOLINT + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Resume(toppars); + Baton b = this->Resume(toppars); // Now iterate through and delete these toppars for (std::vector::const_iterator it = toppars.begin(); // NOLINT @@ -1306,233 +1300,225 @@ NAN_METHOD(KafkaConsumer::NodeResume) { } int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeConsumeLoop) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConsumeLoop(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); if (info.Length() < 3) { // Just throw an exception - return Nan::ThrowError("Invalid number of parameters"); + Napi::Error::New(env, "Invalid number of parameters").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout"); + if (!info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - return Nan::ThrowError("Need to specify a sleep delay"); + if (!info[1].IsNumber()) { + Napi::Error::New(env, "Need to specify a sleep delay").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[0].As()); - - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); - } + uint32_t maybeTimeout = + info[0].As().Uint32Value(); + timeout_ms = static_cast(maybeTimeout); int timeout_sleep_delay_ms; - Nan::Maybe maybeSleep = - Nan::To(info[1].As()); - - if (maybeSleep.IsNothing()) { - timeout_sleep_delay_ms = 500; - } else { - timeout_sleep_delay_ms = static_cast(maybeSleep.FromJust()); - } + uint32_t maybeSleep = + info[1].As().Uint32Value(); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + timeout_sleep_delay_ms = static_cast(maybeSleep); - if (consumer->m_consume_loop != nullptr) { - return Nan::ThrowError("Consume was already called"); + if (this->m_consume_loop != nullptr) { + Napi::Error::New(env, "Consume was already called").ThrowAsJavaScriptException(); + return env.Null(); } - if (!consumer->IsConnected()) { - return Nan::ThrowError("Connect must be called before consume"); + if (!this->IsConnected()) { + Napi::Error::New(env, "Connect must be called before consume").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[2].As(); + Napi::Function cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - consumer->m_consume_loop = - new Workers::KafkaConsumerConsumeLoop(callback, consumer, timeout_ms, timeout_sleep_delay_ms); // NOLINT + this->m_consume_loop = + new Workers::KafkaConsumerConsumeLoop(callback, this, timeout_ms, timeout_sleep_delay_ms); // NOLINT - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeConsume) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); if (info.Length() < 2) { // Just throw an exception - return Nan::ThrowError("Invalid number of parameters"); + Napi::Error::New(env, "Invalid number of parameters").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[0].As()); + uint32_t maybeTimeout = + info[0].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); - } + timeout_ms = static_cast(maybeTimeout); - if (info[1]->IsNumber()) { - if (!info[2]->IsBoolean()) { - return Nan::ThrowError("Need to specify a boolean"); + if (info[1].IsNumber()) { + if (!info[2].IsBoolean()) { + Napi::Error::New(env, "Need to specify a boolean").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[3].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local numMessagesNumber = info[1].As(); - Nan::Maybe numMessagesMaybe = Nan::To(numMessagesNumber); // NOLINT + Napi::Number numMessagesNumber = info[1].As(); + uint32_t numMessages = numMessagesNumber.As().Uint32Value(); // NOLINT - uint32_t numMessages; - if (numMessagesMaybe.IsNothing()) { - return Nan::ThrowError("Parameter must be a number over 0"); - } else { - numMessages = numMessagesMaybe.FromJust(); + if (numMessages == 0) { + Napi::Error::New(env, "Parameter must be a number over 0").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT - Nan::Maybe isTimeoutOnlyForFirstMessageMaybe = - Nan::To(isTimeoutOnlyForFirstMessageBoolean); - - bool isTimeoutOnlyForFirstMessage; - if (isTimeoutOnlyForFirstMessageMaybe.IsNothing()) { - return Nan::ThrowError("Parameter must be a boolean"); - } else { - isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageMaybe.FromJust(); // NOLINT - } + Napi::Boolean isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT + bool isTimeoutOnlyForFirstMessage = + isTimeoutOnlyForFirstMessageBoolean.As().Value(); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerConsumeNum(callback, consumer, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage)); // NOLINT + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + Napi::AsyncWorker *worker = new Workers::KafkaConsumerConsumeNum( + callback, this, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage); + worker->Queue(); } else { - if (!info[1]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[1].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerConsume(callback, consumer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::KafkaConsumerConsume(callback, this, timeout_ms); + worker->Queue(); } - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeConnect) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. // We will deactivate them if the connection fails. - consumer->ActivateDispatchers(); + this->ActivateDispatchers(); - Nan::Callback *callback = new Nan::Callback(info[0].As()); - Nan::AsyncQueueWorker(new Workers::KafkaConsumerConnect(callback, consumer)); - - info.GetReturnValue().Set(Nan::Null()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(info[0].As()); + Napi::AsyncWorker* worker = new Workers::KafkaConsumerConnect(callback, this); + worker->Queue(); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeDisconnect) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); Workers::KafkaConsumerConsumeLoop* consumeLoop = - (Workers::KafkaConsumerConsumeLoop*)consumer->m_consume_loop; + (Workers::KafkaConsumerConsumeLoop*)this->m_consume_loop; if (consumeLoop != nullptr) { // stop the consume loop consumeLoop->Close(); // cleanup the async worker - consumeLoop->WorkComplete(); + // consumeLoop->WorkComplete(); consumeLoop->Destroy(); - consumer->m_consume_loop = nullptr; + this->m_consume_loop = nullptr; } - Nan::AsyncQueueWorker( - new Workers::KafkaConsumerDisconnect(callback, consumer)); - info.GetReturnValue().Set(Nan::Null()); -} + Napi::AsyncWorker* worker = new Workers::KafkaConsumerDisconnect(callback, this); -NAN_METHOD(KafkaConsumer::NodeGetWatermarkOffsets) { - Nan::HandleScope scope; + worker->Queue(); + return env.Null(); +} - KafkaConsumer* obj = ObjectWrap::Unwrap(info.This()); +Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a topic string");; - return; + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a partition number"); - return; + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string topicUTF8 = info[0].As().Utf8Value(); // The first parameter is the topic - std::string topic_name(*topicUTF8); + std::string topic_name(topicUTF8); // Second parameter is the partition - int32_t partition = Nan::To(info[1]).FromJust(); + int32_t partition = info[1].As().Int32Value(); // Set these ints which will store the return data int64_t low_offset; int64_t high_offset; - Baton b = obj->GetWatermarkOffsets( + Baton b = this->GetWatermarkOffsets( topic_name, partition, &low_offset, &high_offset); if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } else { - v8::Local offsetsObj = Nan::New(); - Nan::Set(offsetsObj, Nan::New("lowOffset").ToLocalChecked(), - Nan::New(low_offset)); - Nan::Set(offsetsObj, Nan::New("highOffset").ToLocalChecked(), - Nan::New(high_offset)); + Napi::Object offsetsObj = Napi::Object::New(env); + (offsetsObj).Set(Napi::String::New(env, "lowOffset"), + Napi::Number::New(env, low_offset)); + (offsetsObj).Set(Napi::String::New(env, "highOffset"), + Napi::Number::New(env, high_offset)); - return info.GetReturnValue().Set(offsetsObj); + return offsetsObj; } } diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index e0d93562..366091ef 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -11,7 +11,8 @@ #ifndef SRC_KAFKA_CONSUMER_H_ #define SRC_KAFKA_CONSUMER_H_ -#include +#include +#include #include #include #include @@ -35,11 +36,11 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class KafkaConsumer : public Connection { +class KafkaConsumer : public Connection { friend class Producer; public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(Napi::Env env, Napi::Object); + // static Napi::Object NewInstance(Napi::Value); Baton Connect(); Baton Disconnect(); @@ -90,13 +91,14 @@ class KafkaConsumer : public Connection { void DeactivateDispatchers(); void ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) override; + const Napi::Function& cb, bool add) override; protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo& info); - KafkaConsumer(Conf *, Conf *); + KafkaConsumer(const Napi::CallbackInfo& info); + // KafkaConsumer(Conf *, Conf *); ~KafkaConsumer(); private: @@ -114,32 +116,32 @@ class KafkaConsumer : public Connection { RdKafka::KafkaConsumer *m_consumer = nullptr; // Node methods - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeSubscribe); - static NAN_METHOD(NodeDisconnect); - static NAN_METHOD(NodeAssign); - static NAN_METHOD(NodeUnassign); - static NAN_METHOD(NodeIncrementalAssign); - static NAN_METHOD(NodeIncrementalUnassign); - static NAN_METHOD(NodeAssignments); - static NAN_METHOD(NodeAssignmentLost); - static NAN_METHOD(NodeRebalanceProtocol); - static NAN_METHOD(NodeUnsubscribe); - static NAN_METHOD(NodeCommit); - static NAN_METHOD(NodeCommitSync); - static NAN_METHOD(NodeCommitCb); - static NAN_METHOD(NodeOffsetsStore); - static NAN_METHOD(NodeOffsetsStoreSingle); - static NAN_METHOD(NodeCommitted); - static NAN_METHOD(NodePosition); - static NAN_METHOD(NodeSubscription); - static NAN_METHOD(NodeSeek); - static NAN_METHOD(NodeGetWatermarkOffsets); - static NAN_METHOD(NodeConsumeLoop); - static NAN_METHOD(NodeConsume); - - static NAN_METHOD(NodePause); - static NAN_METHOD(NodeResume); + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeSubscribe(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + Napi::Value NodeAssign(const Napi::CallbackInfo& info); + Napi::Value NodeUnassign(const Napi::CallbackInfo& info); + Napi::Value NodeIncrementalAssign(const Napi::CallbackInfo& info); + Napi::Value NodeIncrementalUnassign(const Napi::CallbackInfo& info); + Napi::Value NodeAssignments(const Napi::CallbackInfo& info); + Napi::Value NodeAssignmentLost(const Napi::CallbackInfo& info); + Napi::Value NodeRebalanceProtocol(const Napi::CallbackInfo& info); + Napi::Value NodeUnsubscribe(const Napi::CallbackInfo& info); + Napi::Value NodeCommit(const Napi::CallbackInfo& info); + Napi::Value NodeCommitSync(const Napi::CallbackInfo& info); + Napi::Value NodeCommitCb(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetsStore(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetsStoreSingle(const Napi::CallbackInfo& info); + Napi::Value NodeCommitted(const Napi::CallbackInfo& info); + Napi::Value NodePosition(const Napi::CallbackInfo& info); + Napi::Value NodeSubscription(const Napi::CallbackInfo& info); + Napi::Value NodeSeek(const Napi::CallbackInfo& info); + Napi::Value NodeGetWatermarkOffsets(const Napi::CallbackInfo& info); + Napi::Value NodeConsumeLoop(const Napi::CallbackInfo& info); + Napi::Value NodeConsume(const Napi::CallbackInfo& info); + + Napi::Value NodePause(const Napi::CallbackInfo& info); + Napi::Value NodeResume(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/producer.cc b/src/producer.cc index 68d8ad75..fc48f24b 100644 --- a/src/producer.cc +++ b/src/producer.cc @@ -30,144 +30,122 @@ namespace NodeKafka { * @sa NodeKafka::Connection */ -Producer::Producer(Conf* gconfig, Conf* tconfig): - Connection(gconfig, tconfig), - m_dr_cb(), - m_partitioner_cb(), - m_is_background_polling(false) { - std::string errstr; +Producer::Producer(const Napi::CallbackInfo &info) + : Connection(info), m_dr_cb(), m_partitioner_cb(), + m_is_background_polling(false) { - if (m_tconfig) - m_gconfig->set("default_topic_conf", m_tconfig, errstr); - - m_gconfig->set("dr_cb", &m_dr_cb, errstr); - } - -Producer::~Producer() { - Disconnect(); -} - -Nan::Persistent Producer::constructor; - -void Producer::Init(v8::Local exports) { - Nan::HandleScope scope; - - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("Producer").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); - - /* - * Lifecycle events inherited from NodeKafka::Connection - * - * @sa NodeKafka::Connection - */ - - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); - - /* - * @brief Methods to do with establishing state - */ - - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT - Nan::SetPrototypeMethod(tpl, "poll", NodePoll); - Nan::SetPrototypeMethod(tpl, "setPollInBackground", NodeSetPollInBackground); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); - - /* - * @brief Methods exposed to do with message production - */ - - Nan::SetPrototypeMethod(tpl, "setPartitioner", NodeSetPartitioner); - Nan::SetPrototypeMethod(tpl, "produce", NodeProduce); - - Nan::SetPrototypeMethod(tpl, "flush", NodeFlush); - - /* - * @brief Methods exposed to do with transactions - */ - - Nan::SetPrototypeMethod(tpl, "initTransactions", NodeInitTransactions); - Nan::SetPrototypeMethod(tpl, "beginTransaction", NodeBeginTransaction); - Nan::SetPrototypeMethod(tpl, "commitTransaction", NodeCommitTransaction); - Nan::SetPrototypeMethod(tpl, "abortTransaction", NodeAbortTransaction); - Nan::SetPrototypeMethod(tpl, "sendOffsetsToTransaction", NodeSendOffsetsToTransaction); // NOLINT - - // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); - - Nan::Set(exports, Nan::New("Producer").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); -} - -void Producer::New(const Nan::FunctionCallbackInfo& info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return; } if (info.Length() < 2) { - return Nan::ThrowError("You must supply global and topic configuration"); + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + return; } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return; } std::string errstr; Conf* gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[0].ToObject()), errstr); if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; } // If tconfig isn't set, then just let us pick properties from gconf. Conf* tconfig = nullptr; - if (info[1]->IsObject()) { + if (info[1].IsObject()) { tconfig = Conf::create( - RdKafka::Conf::CONF_TOPIC, - (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + RdKafka::Conf::CONF_TOPIC, + (info[1].ToObject()), errstr); if (!tconfig) { // No longer need this since we aren't instantiating anything delete gconfig; - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; } } - Producer* producer = new Producer(gconfig, tconfig); + this->Config(gconfig, tconfig); - // Wrap it - producer->Wrap(info.This()); + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it + m_gconfig->set("dr_cb", &m_dr_cb, errstr); +} - info.GetReturnValue().Set(info.This()); +Producer::~Producer() { + Disconnect(); } -v8::Local Producer::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; +Napi::FunctionReference Producer::constructor; - const unsigned argc = 1; +void Producer::Init(const Napi::Env& env, Napi::Object exports) { + Napi::HandleScope scope(env); - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + Napi::Function Producer = DefineClass(env, "Producer", { + /* + * Lifecycle events inherited from NodeKafka::Connection + * + * @sa NodeKafka::Connection + */ - return scope.Escape(instance); + InstanceMethod("configureCallbacks", &Producer::NodeConfigureCallbacks), + + /* + * @brief Methods to do with establishing state + */ + + InstanceMethod("connect", &Producer::NodeConnect), + InstanceMethod("disconnect", &Producer::NodeDisconnect), + InstanceMethod("getMetadata", &Producer::NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &Producer::NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("poll", &Producer::NodePoll), + InstanceMethod("setPollInBackground", &Producer::NodeSetPollInBackground), + InstanceMethod("setSaslCredentials", &Producer::NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &Producer::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure",&Producer::NodeSetOAuthBearerTokenFailure), + + /* + * @brief Methods exposed to do with message production + */ + + InstanceMethod("setPartitioner", &Producer::NodeSetPartitioner), + InstanceMethod("produce", &Producer::NodeProduce), + + InstanceMethod("flush", &Producer::NodeFlush), + + /* + * @brief Methods exposed to do with transactions + */ + + InstanceMethod("initTransactions", &Producer::NodeInitTransactions), + InstanceMethod("beginTransaction", &Producer::NodeBeginTransaction), + InstanceMethod("commitTransaction", &Producer::NodeCommitTransaction), + InstanceMethod("abortTransaction", &Producer::NodeAbortTransaction), + InstanceMethod("sendOffsetsToTransaction", &Producer::NodeSendOffsetsToTransaction), // NOLINT + }); + + + + + // connect. disconnect. resume. pause. get meta data + constructor.Reset(Producer); + + exports.Set(Napi::String::New(env, "Producer"), Producer); } + Baton Producer::Connect() { if (IsConnected()) { return Baton(RdKafka::ERR_NO_ERROR); @@ -238,8 +216,8 @@ Baton Producer::Produce(void* message, size_t size, RdKafka::Topic* topic, if (IsConnected()) { RdKafka::Producer* producer = dynamic_cast(m_client); response_code = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY, - message, size, key, key_len, opaque); + RdKafka::Producer::RK_MSG_COPY, + message, size, key, key_len, opaque); } else { response_code = RdKafka::ERR__STATE; } @@ -308,10 +286,10 @@ Baton Producer::Produce(void* message, size_t size, std::string topic, RdKafka::Producer* producer = dynamic_cast(m_client); // This one is a bit different response_code = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY, - message, size, - key, key_len, - timestamp, headers, opaque); + RdKafka::Producer::RK_MSG_COPY, + message, size, + key, key_len, + timestamp, headers, opaque); } else { response_code = RdKafka::ERR__STATE; } @@ -369,29 +347,29 @@ Baton Producer::SetPollInBackground(bool set) { return Baton(RdKafka::ERR_NO_ERROR); } -void Producer::ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) { - if (string_key.compare("delivery_cb") == 0) { - if (add) { - bool dr_msg_cb = false; - v8::Local dr_msg_cb_key = Nan::New("dr_msg_cb").ToLocalChecked(); // NOLINT - if (Nan::Has(cb, dr_msg_cb_key).FromMaybe(false)) { - v8::Local v = Nan::Get(cb, dr_msg_cb_key).ToLocalChecked(); - if (v->IsBoolean()) { - dr_msg_cb = Nan::To(v).ToChecked(); - } - } - if (dr_msg_cb) { - this->m_dr_cb.SendMessageBuffer(true); - } - this->m_dr_cb.dispatcher.AddCallback(cb); - } else { - this->m_dr_cb.dispatcher.RemoveCallback(cb); - } - } else { - Connection::ConfigureCallback(string_key, cb, add); - } -} +// void Producer::ConfigureCallback(const std::string& string_key, +// const Napi::Function& cb, bool add) { +// if (string_key.compare("delivery_cb") == 0) { +// if (add) { +// bool dr_msg_cb = false; +// Napi::String dr_msg_cb_key = Napi::String::New(env, "dr_msg_cb"); // NOLINT +// if ((cb).Has(dr_msg_cb_key).FromMaybe(false)) { +// Napi::Value v = (cb).Get(dr_msg_cb_key); +// if (v->IsBoolean()) { +// dr_msg_cb = v.As().Value().ToChecked(); +// } +// } +// if (dr_msg_cb) { +// this->m_dr_cb.SendMessageBuffer(true); +// } +// this->m_dr_cb.dispatcher.AddCallback(cb); +// } else { +// this->m_dr_cb.dispatcher.RemoveCallback(cb); +// } +// } else { +// Connection::ConfigureCallback(string_key, cb, add); +// } +// } Baton Producer::InitTransactions(int32_t timeout_ms) { if (!IsConnected()) { @@ -475,22 +453,24 @@ Baton Producer::SendOffsetsToTransaction( * * @sa RdKafka::Producer::produce */ -NAN_METHOD(Producer::NodeProduce) { - Nan::HandleScope scope; +Napi::Value Producer::NodeProduce(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); // Need to extract the message data here. if (info.Length() < 3) { // Just throw an exception - return Nan::ThrowError("Need to specify a topic, partition, and message"); + Napi::Error::New(env, "Need to specify a topic, partition, and message").ThrowAsJavaScriptException(); + return env.Null(); } // Second parameter is the partition int32_t partition; - if (info[1]->IsNull() || info[1]->IsUndefined()) { + if (info[1].IsNull() || info[1].IsUndefined()) { partition = RdKafka::Topic::PARTITION_UA; } else { - partition = Nan::To(info[1]).FromJust(); + partition = info[1].As().Int32Value(); } if (partition < 0) { @@ -500,15 +480,15 @@ NAN_METHOD(Producer::NodeProduce) { size_t message_buffer_length; void* message_buffer_data; - if (info[2]->IsNull()) { + if (info[2].IsNull()) { // This is okay for whatever reason message_buffer_length = 0; message_buffer_data = NULL; - } else if (!node::Buffer::HasInstance(info[2])) { - return Nan::ThrowError("Message must be a buffer or null"); + } else if (!info[2].IsBuffer()) { + Napi::Error::New(env, "Message must be a buffer or null").ThrowAsJavaScriptException(); + return env.Null(); } else { - v8::Local message_buffer_object = - (info[2]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(); + Napi::Object message_buffer_object = info[2].ToObject(); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -518,15 +498,15 @@ NAN_METHOD(Producer::NodeProduce) { // which should be more memory-efficient and allow v8 to dispose of the // buffer sooner - message_buffer_length = node::Buffer::Length(message_buffer_object); - message_buffer_data = node::Buffer::Data(message_buffer_object); + message_buffer_length = message_buffer_object.As>().Length(); + message_buffer_data = message_buffer_object.As>().Data(); if (message_buffer_data == NULL) { // empty string message buffer should not end up as null message - v8::Local message_buffer_object_emptystring = - Nan::NewBuffer(new char[0], 0).ToLocalChecked(); + Napi::Object message_buffer_object_emptystring = + Napi::Buffer::New(env, new char[0], 0); message_buffer_length = - node::Buffer::Length(message_buffer_object_emptystring); - message_buffer_data = node::Buffer::Data(message_buffer_object_emptystring); // NOLINT + message_buffer_object_emptystring.As>().Length(); + message_buffer_data = message_buffer_object_emptystring.As>().Data(); // NOLINT } } @@ -534,13 +514,12 @@ NAN_METHOD(Producer::NodeProduce) { const void* key_buffer_data; std::string * key = NULL; - if (info[3]->IsNull() || info[3]->IsUndefined()) { + if (info[3].IsNull() || info[3].IsUndefined()) { // This is okay for whatever reason key_buffer_length = 0; key_buffer_data = NULL; - } else if (node::Buffer::HasInstance(info[3])) { - v8::Local key_buffer_object = - (info[3]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(); + } else if (info[3].IsBuffer()) { + Napi::Object key_buffer_object = info[3].ToObject(); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -550,21 +529,21 @@ NAN_METHOD(Producer::NodeProduce) { // which should be more memory-efficient and allow v8 to dispose of the // buffer sooner - key_buffer_length = node::Buffer::Length(key_buffer_object); - key_buffer_data = node::Buffer::Data(key_buffer_object); + key_buffer_length = key_buffer_object.As>().Length(); + key_buffer_data = key_buffer_object.As>().Data(); if (key_buffer_data == NULL) { // empty string key buffer should not end up as null key - v8::Local key_buffer_object_emptystring = - Nan::NewBuffer(new char[0], 0).ToLocalChecked(); - key_buffer_length = node::Buffer::Length(key_buffer_object_emptystring); - key_buffer_data = node::Buffer::Data(key_buffer_object_emptystring); + Napi::Object key_buffer_object_emptystring = + Napi::Buffer::New(env, new char[0], 0); + key_buffer_length = key_buffer_object_emptystring.As>().Length(); + key_buffer_data = key_buffer_object_emptystring.As>().Data(); } } else { // If it was a string just use the utf8 value. - v8::Local val = Nan::To(info[3]).ToLocalChecked(); + Napi::String val = info[3].ToString(); // Get string pointer for this thing - Nan::Utf8String keyUTF8(val); - key = new std::string(*keyUTF8); + std::string keyUTF8 = val.Utf8Value(); + key = new std::string(keyUTF8); key_buffer_data = key->data(); key_buffer_length = key->length(); @@ -572,86 +551,86 @@ NAN_METHOD(Producer::NodeProduce) { int64_t timestamp; - if (info.Length() > 4 && !info[4]->IsUndefined() && !info[4]->IsNull()) { - if (!info[4]->IsNumber()) { - return Nan::ThrowError("Timestamp must be a number"); + if (info.Length() > 4 && !info[4].IsUndefined() && !info[4].IsNull()) { + if (!info[4].IsNumber()) { + Napi::Error::New(env, "Timestamp must be a number").ThrowAsJavaScriptException(); + return env.Null(); } - timestamp = Nan::To(info[4]).FromJust(); + timestamp = info[4].As().Int64Value(); } else { timestamp = 0; } void* opaque = NULL; // Opaque handling - if (info.Length() > 5 && !info[5]->IsUndefined()) { + if (info.Length() > 5 && !info[5].IsUndefined()) { // We need to create a persistent handle - opaque = new Nan::Persistent(info[5]); + opaque = Napi::Persistent(info[5]); // To get the local from this later, - // v8::Local object = Nan::New(persistent); + // Napi::Object object = Napi::New(env, persistent); } std::vector headers; - if (info.Length() > 6 && !info[6]->IsUndefined()) { - v8::Local v8Headers = v8::Local::Cast(info[6]); - - if (v8Headers->Length() >= 1) { - for (unsigned int i = 0; i < v8Headers->Length(); i++) { - v8::Local header = Nan::Get(v8Headers, i).ToLocalChecked() - ->ToObject(Nan::GetCurrentContext()).ToLocalChecked(); - if (header.IsEmpty()) { - continue; - } - - v8::Local props = header->GetOwnPropertyNames( - Nan::GetCurrentContext()).ToLocalChecked(); - - // TODO: Other properties in the list of properties should not be - // ignored, but they are. This is a bug, need to handle it either in JS - // or here. - Nan::MaybeLocal v8Key = - Nan::To(Nan::Get(props, 0).ToLocalChecked()); - - // The key must be a string. - if (v8Key.IsEmpty()) { - Nan::ThrowError("Header key must be a string"); - } - Nan::Utf8String uKey(v8Key.ToLocalChecked()); - std::string key(*uKey); - - // Valid types for the header are string or buffer. - // Other types will throw an error. - v8::Local v8Value = - Nan::Get(header, v8Key.ToLocalChecked()).ToLocalChecked(); - - if (node::Buffer::HasInstance(v8Value)) { - const char* value = node::Buffer::Data(v8Value); - const size_t value_len = node::Buffer::Length(v8Value); - headers.push_back(RdKafka::Headers::Header(key, value, value_len)); - } else if (v8Value->IsString()) { - Nan::Utf8String uValue(v8Value); - std::string value(*uValue); - headers.push_back( - RdKafka::Headers::Header(key, value.c_str(), value.size())); - } else { - Nan::ThrowError("Header value must be a string or buffer"); - } + if (info.Length() > 6 && !info[6].IsUndefined()) { + Napi::Array v8Headers = info[6].As(); + + if (v8Headers.Length() >= 1) { + for (unsigned int i = 0; i < v8Headers.Length(); i++) { + Napi::Object header = (v8Headers).Get(i) + .ToObject(); + if (header.IsEmpty()) { + continue; + } + + Napi::Array props = header.GetPropertyNames(); + + // TODO: Other properties in the list of properties should not be + // ignored, but they are. This is a bug, need to handle it either in JS + // or here. + Napi::MaybeOrValue jsKey = props.Get(Napi::Value::From(env, 0)); + + // The key must be a string. + if (jsKey.IsEmpty()) { + Napi::Error::New(env, "Header key must be a string").ThrowAsJavaScriptException(); + + } + std::string uKey = jsKey.ToString().Utf8Value(); + std::string key(uKey); + + // Valid types for the header are string or buffer. + // Other types will throw an error. + Napi::Value v8Value = + (header).Get(jsKey); + + if (v8Value.IsBuffer()) { + const char* value = v8Value.As>().Data(); + const size_t value_len = v8Value.As>().Length(); + headers.push_back(RdKafka::Headers::Header(key, value, value_len)); + } else if (v8Value.IsString()) { + std::string uValue = v8Value.As().Utf8Value(); + std::string value(uValue); + headers.push_back( + RdKafka::Headers::Header(key, value.c_str(), value.size())); + } else { + Napi::Error::New(env, "Header value must be a string or buffer").ThrowAsJavaScriptException(); + + } } } } - Producer* producer = ObjectWrap::Unwrap(info.This()); // Let the JS library throw if we need to so the error can be more rich int error_code; - if (info[0]->IsString()) { + if (info[0].IsString()) { // Get string pointer for this thing - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); - std::string topic_name(*topicUTF8); + std::string topicUTF8 = info[0].ToString().Utf8Value(); + std::string topic_name(topicUTF8); RdKafka::Headers *rd_headers = RdKafka::Headers::create(headers); - Baton b = producer->Produce(message_buffer_data, message_buffer_length, + Baton b = this->Produce(message_buffer_data, message_buffer_length, topic_name, partition, key_buffer_data, key_buffer_length, timestamp, opaque, rd_headers); @@ -661,21 +640,21 @@ NAN_METHOD(Producer::NodeProduce) { } } else { // First parameter is a topic OBJECT - Topic* topic = ObjectWrap::Unwrap(info[0].As()); + Topic* topic = ObjectWrap::Unwrap(info[0].As()); // Unwrap it and turn it into an RdKafka::Topic* - Baton topic_baton = topic->toRDKafkaTopic(producer); + Baton topic_baton = topic->toRDKafkaTopic(this); if (topic_baton.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich error_code = static_cast(topic_baton.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } RdKafka::Topic* rd_topic = topic_baton.data(); - Baton b = producer->Produce(message_buffer_data, message_buffer_length, + Baton b = this->Produce(message_buffer_data, message_buffer_length, rd_topic, partition, key_buffer_data, key_buffer_length, opaque); // Delete the topic when we are done. @@ -689,8 +668,8 @@ NAN_METHOD(Producer::NodeProduce) { // be a delivery report for it, so we have to clean up the opaque // data now, if there was any. - Nan::Persistent *persistent = - static_cast *>(opaque); + Napi::Reference *persistent = + static_cast *>(opaque); persistent->Reset(); delete persistent; } @@ -699,75 +678,78 @@ NAN_METHOD(Producer::NodeProduce) { delete key; } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(Producer::NodeSetPartitioner) { - Nan::HandleScope scope; +Napi::Value Producer::NodeSetPartitioner(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - Producer* producer = ObjectWrap::Unwrap(info.This()); - v8::Local cb = info[0].As(); - producer->m_partitioner_cb.SetCallback(cb); - info.GetReturnValue().Set(Nan::True()); + Napi::Function cb = info[0].As(); + this->m_partitioner_cb.SetCallback(cb); + return Napi::Value::From(env, true); } -NAN_METHOD(Producer::NodeConnect) { - Nan::HandleScope scope; +Napi::Value Producer::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } // This needs to be offloaded to libuv - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); - - Producer* producer = ObjectWrap::Unwrap(info.This()); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. // We will deactivate them if the connection fails. - producer->ActivateDispatchers(); + this->ActivateDispatchers(); - Nan::AsyncQueueWorker(new Workers::ProducerConnect(callback, producer)); + (new Workers::ProducerConnect(callback, this))->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodePoll) { - Nan::HandleScope scope; - - Producer* producer = ObjectWrap::Unwrap(info.This()); +Napi::Value Producer::NodePoll(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (!producer->IsConnected()) { - Nan::ThrowError("Producer is disconnected"); + if (!this->IsConnected()) { + Napi::Error::New(env, "Producer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } else { - producer->Poll(); - info.GetReturnValue().Set(Nan::True()); + this->Poll(); + return Napi::Boolean::From(env, true); } } -NAN_METHOD(Producer::NodeSetPollInBackground) { - Nan::HandleScope scope; - if (info.Length() < 1 || !info[0]->IsBoolean()) { +Napi::Value Producer::NodeSetPollInBackground(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + if (info.Length() < 1 || !info[0].IsBoolean()) { // Just throw an exception - return Nan::ThrowError( - "Need to specify a boolean for setting or unsetting"); + Napi::Error::New(env, "Need to specify a boolean for setting or unsetting") + .ThrowAsJavaScriptException(); } - bool set = Nan::To(info[0]).FromJust(); + bool set = info[0].As().Value(); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Baton b = producer->SetPollInBackground(set); + Baton b = this->SetPollInBackground(set); if (b.err() != RdKafka::ERR_NO_ERROR) { - return Nan::ThrowError(b.errstr().c_str()); + Napi::Error::New(env, b.errstr().c_str()).ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(b.ToObject()); + return b.ToError(env).Value(); } Baton Producer::Flush(int timeout_ms) { @@ -787,156 +769,176 @@ Baton Producer::Flush(int timeout_ms) { return Baton(response_code); } -NAN_METHOD(Producer::NodeFlush) { - Nan::HandleScope scope; +Napi::Value Producer::NodeFlush(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { // Just throw an exception - return Nan::ThrowError("Need to specify a timeout and a callback"); + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); + Napi::AsyncWorker* worker = new Workers::ProducerFlush(callback, this, timeout_ms); + worker->Queue(); - Nan::AsyncQueueWorker( - new Workers::ProducerFlush(callback, producer, timeout_ms)); - - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeDisconnect) { - Nan::HandleScope scope; +Napi::Value Producer::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerDisconnect(callback, producer)); + Napi::AsyncWorker* worker = new Workers::ProducerDisconnect(callback, this); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeInitTransactions) { - Nan::HandleScope scope; +Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( - new Workers::ProducerInitTransactions(callback, producer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::ProducerInitTransactions(callback, this, timeout_ms); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeBeginTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 1 || !info[0].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerBeginTransaction(callback, producer)); // NOLINT + Napi::AsyncWorker* worker = new Workers::ProducerBeginTransaction(callback, this); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeCommitTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeCommitTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( - new Workers::ProducerCommitTransaction(callback, producer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::ProducerCommitTransaction(callback, this, timeout_ms); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeAbortTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeAbortTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( - new Workers::ProducerAbortTransaction(callback, producer, timeout_ms)); + Napi::AsyncWorker *worker = + new Workers::ProducerAbortTransaction(callback, this, timeout_ms); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeSendOffsetsToTransaction) { - Nan::HandleScope scope; +Napi::Value +Producer::NodeSendOffsetsToTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); + Napi::HandleScope scope(env); if (info.Length() < 4) { - return Nan::ThrowError( + return ThrowError(env, "Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback"); // NOLINT } - if (!info[0]->IsArray()) { - return Nan::ThrowError( + if (!info[0].IsArray()) { + return ThrowError(env, "First argument to 'send offsets to transaction' has to be a consumer object"); // NOLINT } - if (!info[1]->IsObject()) { - Nan::ThrowError("Kafka consumer must be provided"); + if (!info[1].IsObject()) { + return ThrowError(env, "Kafka consumer must be provided"); + } - if (!info[2]->IsNumber()) { - Nan::ThrowError("Timeout must be provided"); + if (!info[2].IsNumber()) { + return ThrowError(env, "Timeout must be provided"); + } - if (!info[3]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[3].IsFunction()) { + return ThrowError(env, "Need to specify a callback"); } - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); - NodeKafka::KafkaConsumer* consumer = - ObjectWrap::Unwrap(info[1].As()); - int timeout_ms = Nan::To(info[2]).FromJust(); - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); + std::vector toppars = + Conversion::TopicPartition::FromV8Array(info[0].As()); + + NodeKafka::KafkaConsumer *consumer = + ObjectWrap::Unwrap(info[1].As()); + + int timeout_ms = info[2].As().Int32Value(); + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + Producer *producer = this; - Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerSendOffsetsToTransaction( - callback, - producer, - toppars, - consumer, - timeout_ms)); + Napi::AsyncWorker *worker = new Workers::ProducerSendOffsetsToTransaction( + callback, producer, toppars, consumer, timeout_ms); + worker->Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } } // namespace NodeKafka diff --git a/src/producer.h b/src/producer.h index 8df138e8..48db04dc 100644 --- a/src/producer.h +++ b/src/producer.h @@ -10,8 +10,10 @@ #ifndef SRC_PRODUCER_H_ #define SRC_PRODUCER_H_ -#include -#include +#include +#include +#include +#include #include #include #include @@ -27,7 +29,7 @@ namespace NodeKafka { class ProducerMessage { public: - explicit ProducerMessage(v8::Local, NodeKafka::Topic*); + explicit ProducerMessage(Napi::Object, NodeKafka::Topic*); ~ProducerMessage(); void* Payload(); @@ -47,10 +49,9 @@ class ProducerMessage { bool m_is_empty; }; -class Producer : public Connection { +class Producer : public Connection { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(const Napi::Env&, Napi::Object); Baton Connect(); void Disconnect(); @@ -80,8 +81,8 @@ class Producer : public Connection { void ActivateDispatchers(); void DeactivateDispatchers(); - void ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) override; + // void ConfigureCallback(const std::string& string_key, + // const Napi::Function& cb, bool add) override; Baton InitTransactions(int32_t timeout_ms); Baton BeginTransaction(); @@ -93,27 +94,27 @@ class Producer : public Connection { int timeout_ms); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo&); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo&); - Producer(Conf*, Conf*); + Producer(const Napi::CallbackInfo& info); ~Producer(); private: - static NAN_METHOD(NodeProduce); - static NAN_METHOD(NodeSetPartitioner); - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeDisconnect); - static NAN_METHOD(NodePoll); - static NAN_METHOD(NodeSetPollInBackground); + Napi::Value NodeProduce(const Napi::CallbackInfo& info); + Napi::Value NodeSetPartitioner(const Napi::CallbackInfo& info); + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + Napi::Value NodePoll(const Napi::CallbackInfo& info); + Napi::Value NodeSetPollInBackground(const Napi::CallbackInfo& info); #if RD_KAFKA_VERSION > 0x00090200 - static NAN_METHOD(NodeFlush); + Napi::Value NodeFlush(const Napi::CallbackInfo& info); #endif - static NAN_METHOD(NodeInitTransactions); - static NAN_METHOD(NodeBeginTransaction); - static NAN_METHOD(NodeCommitTransaction); - static NAN_METHOD(NodeAbortTransaction); - static NAN_METHOD(NodeSendOffsetsToTransaction); + Napi::Value NodeInitTransactions(const Napi::CallbackInfo& info); + Napi::Value NodeBeginTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeCommitTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeAbortTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info); Callbacks::Delivery m_dr_cb; Callbacks::Partitioner m_partitioner_cb; diff --git a/src/topic.cc b/src/topic.cc index 78653c41..450d5dd6 100644 --- a/src/topic.cc +++ b/src/topic.cc @@ -8,9 +8,7 @@ */ #include -#include -#include "src/common.h" #include "src/connection.h" #include "src/topic.h" @@ -29,10 +27,11 @@ namespace NodeKafka { * @sa NodeKafka::Connection */ -Topic::Topic(std::string topic_name, RdKafka::Conf* config): - m_topic_name(topic_name), - m_config(config) { +void Topic::Setup(std::string topic_name, RdKafka::Conf *config) { + m_topic_name = topic_name; + // We probably want to copy the config. May require refactoring if we do not + m_config = config; } Topic::~Topic() { @@ -45,7 +44,7 @@ std::string Topic::name() { return m_topic_name; } -Baton Topic::toRDKafkaTopic(Connection* handle) { +template Baton Topic::toRDKafkaTopic(Connection* handle) { if (m_config) { return handle->CreateTopic(m_topic_name, m_config); } else { @@ -74,99 +73,80 @@ Baton offset_store (int32_t partition, int64_t offset) { */ -Nan::Persistent Topic::constructor; - -void Topic::Init(v8::Local exports) { - Nan::HandleScope scope; +Napi::FunctionReference Topic::constructor; - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("Topic").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); +void Topic::Init(const Napi::Env &env, Napi::Object exports) { + Napi::HandleScope scope(env); - Nan::SetPrototypeMethod(tpl, "name", NodeGetName); + Napi::Function Topic = DefineClass(env, "Topic", { + InstanceMethod("name", &Topic::NodeGetName), + }); // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); + constructor.Reset(Topic); - Nan::Set(exports, Nan::New("Topic").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); + exports.Set(Napi::String::New(env, "Topic"), Topic); } -void Topic::New(const Nan::FunctionCallbackInfo& info) { +Topic::Topic(const Napi::CallbackInfo &info): ObjectWrap(info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return; } if (info.Length() < 1) { - return Nan::ThrowError("topic name is required"); + Napi::Error::New(env, "topic name is required").ThrowAsJavaScriptException(); + return; } - if (!info[0]->IsString()) { - return Nan::ThrowError("Topic name must be a string"); + if (!info[0].IsString()) { + Napi::Error::New(env, "Topic name must be a string").ThrowAsJavaScriptException(); + return; } RdKafka::Conf* config = NULL; - if (info.Length() >= 2 && !info[1]->IsUndefined() && !info[1]->IsNull()) { + if (info.Length() >= 2 && !info[1].IsUndefined() && !info[1].IsNull()) { // If they gave us two parameters, or the 3rd parameter is null or // undefined, we want to pass null in for the config std::string errstr; - if (!info[1]->IsObject()) { - return Nan::ThrowError("Configuration data must be specified"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Configuration data must be specified").ThrowAsJavaScriptException(); + return; } - config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); // NOLINT + config = Conf::create(RdKafka::Conf::CONF_TOPIC, info[1].ToObject(), errstr); // NOLINT if (!config) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; } } - Nan::Utf8String parameterValue(Nan::To(info[0]).ToLocalChecked()); - std::string topic_name(*parameterValue); - - Topic* topic = new Topic(topic_name, config); - - // Wrap it - topic->Wrap(info.This()); - - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - info.GetReturnValue().Set(info.This()); -} - -// handle - -v8::Local Topic::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; - - const unsigned argc = 1; - - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + std::string parameterValue = info[0].ToString(); + std::string topic_name(parameterValue); - return scope.Escape(instance); + this->Setup(topic_name, config); } -NAN_METHOD(Topic::NodeGetName) { - Nan::HandleScope scope; +Napi::Value Topic::NodeGetName(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Topic* topic = ObjectWrap::Unwrap(info.This()); + Topic* topic = this; - info.GetReturnValue().Set(Nan::New(topic->name()).ToLocalChecked()); + return Napi::String::From(env, this->name()); } -NAN_METHOD(Topic::NodePartitionAvailable) { +Napi::Value Topic::NodePartitionAvailable(const Napi::CallbackInfo &info) { + return info.Env().Null(); // @TODO(sparente) } -NAN_METHOD(Topic::NodeOffsetStore) { +Napi::Value Topic::NodeOffsetStore(const Napi::CallbackInfo& info) { + return info.Env().Null(); // @TODO(sparente) } diff --git a/src/topic.h b/src/topic.h index d487d089..d2de59a9 100644 --- a/src/topic.h +++ b/src/topic.h @@ -10,27 +10,28 @@ #ifndef SRC_TOPIC_H_ #define SRC_TOPIC_H_ -#include +#include +#include #include #include "rdkafkacpp.h" // NOLINT #include "src/config.h" +#include "src/connection.h" namespace NodeKafka { -class Topic : public Nan::ObjectWrap { +class Topic : public Napi::ObjectWrap { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local arg); + static void Init(const Napi::Env &, Napi::Object); + Topic(const Napi::CallbackInfo& info); - Baton toRDKafkaTopic(Connection *handle); + template Baton toRDKafkaTopic(Connection *handle); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; - static NAN_METHOD(NodeGetMetadata); + Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); // TopicConfig * config_; @@ -38,15 +39,15 @@ class Topic : public Nan::ObjectWrap { std::string name(); private: - Topic(std::string, RdKafka::Conf *); + void Setup(std::string, RdKafka::Conf *); ~Topic(); std::string m_topic_name; RdKafka::Conf * m_config; - static NAN_METHOD(NodeGetName); - static NAN_METHOD(NodePartitionAvailable); - static NAN_METHOD(NodeOffsetStore); + Napi::Value NodeGetName(const Napi::CallbackInfo& info); + Napi::Value NodePartitionAvailable(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetStore(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/workers.cc b/src/workers.cc index 4655458d..b20fe7fc 100644 --- a/src/workers.cc +++ b/src/workers.cc @@ -36,10 +36,10 @@ namespace Handle { * @see RdKafka::KafkaConsumer::Committed */ -OffsetsForTimes::OffsetsForTimes(Nan::Callback *callback, - Connection* handle, - std::vector & t, - const int & timeout_ms) : +OffsetsForTimes::OffsetsForTimes(Napi::FunctionReference *callback, + Connection* handle, + std::vector & t, + const int & timeout_ms) : ErrorAwareWorker(callback), m_handle(handle), m_topic_partitions(t), @@ -57,30 +57,30 @@ void OffsetsForTimes::Execute() { } } -void OffsetsForTimes::HandleOKCallback() { - Nan::HandleScope scope; +void OffsetsForTimes::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::TopicPartition::ToV8Array(m_topic_partitions); callback->Call(argc, argv); } -void OffsetsForTimes::HandleErrorCallback() { - Nan::HandleScope scope; +void OffsetsForTimes::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } } // namespace Handle ConnectionMetadata::ConnectionMetadata( - Nan::Callback *callback, Connection* connection, + Napi::FunctionReference *callback, Connection* connection, std::string topic, int timeout_ms, bool all_topics) : ErrorAwareWorker(callback), m_connection(connection), @@ -104,13 +104,13 @@ void ConnectionMetadata::Execute() { } } -void ConnectionMetadata::HandleOKCallback() { - Nan::HandleScope scope; +void ConnectionMetadata::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; // This is a big one! - v8::Local argv[argc] = { Nan::Null(), + Napi::Value argv[argc] = { env.Null(), Conversion::Metadata::ToV8Object(m_metadata)}; callback->Call(argc, argv); @@ -118,11 +118,11 @@ void ConnectionMetadata::HandleOKCallback() { delete m_metadata; } -void ConnectionMetadata::HandleErrorCallback() { - Nan::HandleScope scope; +void ConnectionMetadata::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -130,14 +130,14 @@ void ConnectionMetadata::HandleErrorCallback() { /** * @brief Client query watermark offsets worker * - * Easy Nan::AsyncWorker for getting watermark offsets from a broker + * Easy Napi::AsyncWorker for getting watermark offsets from a broker * * @sa RdKafka::Handle::query_watermark_offsets * @sa NodeKafka::Connection::QueryWatermarkOffsets */ ConnectionQueryWatermarkOffsets::ConnectionQueryWatermarkOffsets( - Nan::Callback *callback, Connection* connection, + Napi::FunctionReference *callback, Connection* connection, std::string topic, int32_t partition, int timeout_ms) : ErrorAwareWorker(callback), m_connection(connection), @@ -157,28 +157,28 @@ void ConnectionQueryWatermarkOffsets::Execute() { } } -void ConnectionQueryWatermarkOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void ConnectionQueryWatermarkOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local offsetsObj = Nan::New(); - Nan::Set(offsetsObj, Nan::New("lowOffset").ToLocalChecked(), - Nan::New(m_low_offset)); - Nan::Set(offsetsObj, Nan::New("highOffset").ToLocalChecked(), - Nan::New(m_high_offset)); + Napi::Object offsetsObj = Napi::Object::New(env); + (offsetsObj).Set(Napi::String::New(env, "lowOffset"), + Napi::Number::New(env, m_low_offset)); + (offsetsObj).Set(Napi::String::New(env, "highOffset"), + Napi::Number::New(env, m_high_offset)); // This is a big one! - v8::Local argv[argc] = { Nan::Null(), offsetsObj}; + Napi::Value argv[argc] = { env.Null(), offsetsObj}; callback->Call(argc, argv); } -void ConnectionQueryWatermarkOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void ConnectionQueryWatermarkOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -186,13 +186,13 @@ void ConnectionQueryWatermarkOffsets::HandleErrorCallback() { /** * @brief Producer connect worker. * - * Easy Nan::AsyncWorker for setting up client connections + * Easy Napi::AsyncWorker for setting up client connections * * @sa RdKafka::Producer::connect * @sa NodeKafka::Producer::Connect */ -ProducerConnect::ProducerConnect(Nan::Callback *callback, Producer* producer): +ProducerConnect::ProducerConnect(Napi::FunctionReference *callback, Producer* producer): ErrorAwareWorker(callback), producer(producer) {} @@ -206,27 +206,27 @@ void ProducerConnect::Execute() { } } -void ProducerConnect::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerConnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local obj = Nan::New(); - Nan::Set(obj, Nan::New("name").ToLocalChecked(), - Nan::New(producer->Name()).ToLocalChecked()); + Napi::Object obj = Napi::Object::New(env); + (obj).Set(Napi::String::New(env, "name"), + Napi::New(env, producer->Name())); - v8::Local argv[argc] = { Nan::Null(), obj}; + Napi::Value argv[argc] = { env.Null(), obj}; callback->Call(argc, argv); } -void ProducerConnect::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerConnect::OnError() { + Napi::HandleScope scope(env); producer->DeactivateDispatchers(); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -234,10 +234,10 @@ void ProducerConnect::HandleErrorCallback() { /** * @brief Producer disconnect worker * - * Easy Nan::AsyncWorker for disconnecting from clients + * Easy Napi::AsyncWorker for disconnecting from clients */ -ProducerDisconnect::ProducerDisconnect(Nan::Callback *callback, +ProducerDisconnect::ProducerDisconnect(Napi::FunctionReference *callback, Producer* producer): ErrorAwareWorker(callback), producer(producer) {} @@ -248,11 +248,11 @@ void ProducerDisconnect::Execute() { producer->Disconnect(); } -void ProducerDisconnect::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerDisconnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc] = { Nan::Null(), Nan::True()}; + Napi::Value argv[argc] = { env.Null(), env.True()}; // Deactivate the dispatchers producer->DeactivateDispatchers(); @@ -260,7 +260,7 @@ void ProducerDisconnect::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerDisconnect::HandleErrorCallback() { +void ProducerDisconnect::OnError() { // This should never run assert(0); } @@ -268,10 +268,10 @@ void ProducerDisconnect::HandleErrorCallback() { /** * @brief Producer flush worker * - * Easy Nan::AsyncWorker for flushing a producer. + * Easy Napi::AsyncWorker for flushing a producer. */ -ProducerFlush::ProducerFlush(Nan::Callback *callback, +ProducerFlush::ProducerFlush(Napi::FunctionReference *callback, Producer* producer, int timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -291,11 +291,11 @@ void ProducerFlush::Execute() { } } -void ProducerFlush::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerFlush::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; callback->Call(argc, argv); } @@ -303,13 +303,13 @@ void ProducerFlush::HandleOKCallback() { /** * @brief Producer init transactions worker. * - * Easy Nan::AsyncWorker for initiating transactions + * Easy Napi::AsyncWorker for initiating transactions * * @sa RdKafka::Producer::init_transactions * @sa NodeKafka::Producer::InitTransactions */ -ProducerInitTransactions::ProducerInitTransactions(Nan::Callback *callback, +ProducerInitTransactions::ProducerInitTransactions(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -325,11 +325,11 @@ void ProducerInitTransactions::Execute() { } } -void ProducerInitTransactions::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerInitTransactions::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -337,11 +337,11 @@ void ProducerInitTransactions::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerInitTransactions::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerInitTransactions::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -349,14 +349,14 @@ void ProducerInitTransactions::HandleErrorCallback() { /** * @brief Producer begin transaction worker. * - * Easy Nan::AsyncWorker for begin transaction + * Easy Napi::AsyncWorker for begin transaction * * @sa RdKafka::Producer::begin_transaction * @sa NodeKafka::Producer::BeginTransaction */ -ProducerBeginTransaction::ProducerBeginTransaction(Nan::Callback* callback, - Producer* producer) +ProducerBeginTransaction::ProducerBeginTransaction(Napi::FunctionReference* callback, + Producer* producer) : ErrorAwareWorker(callback), producer(producer) {} ProducerBeginTransaction::~ProducerBeginTransaction() {} @@ -369,12 +369,12 @@ void ProducerBeginTransaction::Execute() { } } -void ProducerBeginTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerBeginTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -382,11 +382,11 @@ void ProducerBeginTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerBeginTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerBeginTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -394,13 +394,13 @@ void ProducerBeginTransaction::HandleErrorCallback() { /** * @brief Producer commit transaction worker. * - * Easy Nan::AsyncWorker for committing transaction + * Easy Napi::AsyncWorker for committing transaction * * @sa RdKafka::Producer::commit_transaction * @sa NodeKafka::Producer::CommitTransaction */ -ProducerCommitTransaction::ProducerCommitTransaction(Nan::Callback *callback, +ProducerCommitTransaction::ProducerCommitTransaction(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -416,11 +416,11 @@ void ProducerCommitTransaction::Execute() { } } -void ProducerCommitTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerCommitTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -428,11 +428,11 @@ void ProducerCommitTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerCommitTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerCommitTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -440,13 +440,13 @@ void ProducerCommitTransaction::HandleErrorCallback() { /** * @brief Producer abort transaction worker. * - * Easy Nan::AsyncWorker for aborting transaction + * Easy Napi::AsyncWorker for aborting transaction * * @sa RdKafka::Producer::abort_transaction * @sa NodeKafka::Producer::AbortTransaction */ -ProducerAbortTransaction::ProducerAbortTransaction(Nan::Callback *callback, +ProducerAbortTransaction::ProducerAbortTransaction(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -462,11 +462,11 @@ void ProducerAbortTransaction::Execute() { } } -void ProducerAbortTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerAbortTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -474,11 +474,11 @@ void ProducerAbortTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerAbortTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerAbortTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -486,14 +486,14 @@ void ProducerAbortTransaction::HandleErrorCallback() { /** * @brief Producer SendOffsetsToTransaction worker. * - * Easy Nan::AsyncWorker for SendOffsetsToTransaction + * Easy Napi::AsyncWorker for SendOffsetsToTransaction * * @sa RdKafka::Producer::send_offsets_to_transaction * @sa NodeKafka::Producer::SendOffsetsToTransaction */ ProducerSendOffsetsToTransaction::ProducerSendOffsetsToTransaction( - Nan::Callback *callback, + Napi::FunctionReference *callback, Producer* producer, std::vector & t, KafkaConsumer* consumer, @@ -508,18 +508,18 @@ ProducerSendOffsetsToTransaction::~ProducerSendOffsetsToTransaction() {} void ProducerSendOffsetsToTransaction::Execute() { Baton b = producer->SendOffsetsToTransaction(m_topic_partitions, consumer, - m_timeout_ms); + m_timeout_ms); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void ProducerSendOffsetsToTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerSendOffsetsToTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -527,11 +527,11 @@ void ProducerSendOffsetsToTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerSendOffsetsToTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerSendOffsetsToTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -539,13 +539,13 @@ void ProducerSendOffsetsToTransaction::HandleErrorCallback() { /** * @brief KafkaConsumer connect worker. * - * Easy Nan::AsyncWorker for setting up client connections + * Easy Napi::AsyncWorker for setting up client connections * * @sa RdKafka::KafkaConsumer::connect * @sa NodeKafka::KafkaConsumer::Connect */ -KafkaConsumerConnect::KafkaConsumerConnect(Nan::Callback *callback, +KafkaConsumerConnect::KafkaConsumerConnect(Napi::FunctionReference *callback, KafkaConsumer* consumer): ErrorAwareWorker(callback), consumer(consumer) {} @@ -561,28 +561,28 @@ void KafkaConsumerConnect::Execute() { } } -void KafkaConsumerConnect::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; // Create the object - v8::Local obj = Nan::New(); - Nan::Set(obj, Nan::New("name").ToLocalChecked(), - Nan::New(consumer->Name()).ToLocalChecked()); + Napi::Object obj = Napi::Object::New(env); + (obj).Set(Napi::String::New(env, "name"), + Napi::New(env, consumer->Name())); - v8::Local argv[argc] = { Nan::Null(), obj }; + Napi::Value argv[argc] = { env.Null(), obj }; callback->Call(argc, argv); } -void KafkaConsumerConnect::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConnect::OnError() { + Napi::HandleScope scope(env); consumer->DeactivateDispatchers(); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; + Napi::Value argv[argc] = { Napi::Error::New(env, ErrorMessage()) }; callback->Call(argc, argv); } @@ -590,13 +590,13 @@ void KafkaConsumerConnect::HandleErrorCallback() { /** * @brief KafkaConsumer disconnect worker. * - * Easy Nan::AsyncWorker for disconnecting and cleaning up librdkafka artifacts + * Easy Napi::AsyncWorker for disconnecting and cleaning up librdkafka artifacts * * @sa RdKafka::KafkaConsumer::disconnect * @sa NodeKafka::KafkaConsumer::Disconnect */ -KafkaConsumerDisconnect::KafkaConsumerDisconnect(Nan::Callback *callback, +KafkaConsumerDisconnect::KafkaConsumerDisconnect(Napi::FunctionReference *callback, KafkaConsumer* consumer): ErrorAwareWorker(callback), consumer(consumer) {} @@ -611,22 +611,22 @@ void KafkaConsumerDisconnect::Execute() { } } -void KafkaConsumerDisconnect::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerDisconnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc] = { Nan::Null(), Nan::True() }; + Napi::Value argv[argc] = { env.Null(), env.True() }; consumer->DeactivateDispatchers(); callback->Call(argc, argv); } -void KafkaConsumerDisconnect::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerDisconnect::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; consumer->DeactivateDispatchers(); @@ -636,7 +636,7 @@ void KafkaConsumerDisconnect::HandleErrorCallback() { /** * @brief KafkaConsumer get messages worker. * - * A more complex Nan::AsyncProgressWorker. I made a custom superclass to deal + * A more complex Napi::AsyncProgressWorker. I made a custom superclass to deal * with more real-time progress points. Instead of using ProgressWorker, which * is not time sensitive, this custom worker will poll using libuv and send * data back to v8 as it comes available without missing any @@ -654,17 +654,17 @@ void KafkaConsumerDisconnect::HandleErrorCallback() { * @sa NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Nan::Callback *callback, - KafkaConsumer* consumer, - const int & timeout_ms, - const int & timeout_sleep_delay_ms) : +KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Napi::FunctionReference *callback, + KafkaConsumer* consumer, + const int & timeout_ms, + const int & timeout_sleep_delay_ms) : MessageWorker(callback), consumer(consumer), m_looping(true), m_timeout_ms(timeout_ms), m_timeout_sleep_delay_ms(timeout_sleep_delay_ms) { uv_thread_create(&thread_event_loop, KafkaConsumerConsumeLoop::ConsumeLoop, - reinterpret_cast(this)); + reinterpret_cast(this)); } KafkaConsumerConsumeLoop::~KafkaConsumerConsumeLoop() {} @@ -691,35 +691,35 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { if (ec == RdKafka::ERR_NO_ERROR) { RdKafka::Message *message = b.data(); switch (message->err()) { - case RdKafka::ERR__PARTITION_EOF: - bus.Send(message); - break; - - case RdKafka::ERR__TIMED_OUT: - case RdKafka::ERR__TIMED_OUT_QUEUE: - delete message; - if (consumerLoop->m_timeout_sleep_delay_ms > 0) { - // If it is timed out this could just mean there were no - // new messages fetched quickly enough. This isn't really - // an error that should kill us. - #ifndef _WIN32 - usleep(consumerLoop->m_timeout_sleep_delay_ms*1000); - #else - _sleep(consumerLoop->m_timeout_sleep_delay_ms); - #endif - } - break; - case RdKafka::ERR_NO_ERROR: - bus.Send(message); - break; - default: - // Unknown error. We need to break out of this - consumerLoop->SetErrorBaton(b); - consumerLoop->m_looping = false; - break; - } + case RdKafka::ERR__PARTITION_EOF: + bus.Send(message); + break; + + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR__TIMED_OUT_QUEUE: + delete message; + if (consumerLoop->m_timeout_sleep_delay_ms > 0) { + // If it is timed out this could just mean there were no + // new messages fetched quickly enough. This isn't really + // an error that should kill us. + #ifndef _WIN32 + usleep(consumerLoop->m_timeout_sleep_delay_ms*1000); + #else + _sleep(consumerLoop->m_timeout_sleep_delay_ms); + #endif + } + break; + case RdKafka::ERR_NO_ERROR: + bus.Send(message); + break; + default: + // Unknown error. We need to break out of this + consumerLoop->SetErrorBaton(b); + consumerLoop->m_looping = false; + break; + } } else if (ec == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART || - ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { + ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { bus.SendWarning(ec); } else { // Unknown error. We need to break out of this @@ -730,38 +730,38 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { } void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, - RdKafka::ErrorCode ec) { - Nan::HandleScope scope; + RdKafka::ErrorCode ec) { + Napi::HandleScope scope(env); const unsigned int argc = 4; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); if (msg == NULL) { - argv[1] = Nan::Null(); - argv[2] = Nan::Null(); - argv[3] = Nan::New(ec); + argv[1] = env.Null(); + argv[2] = env.Null(); + argv[3] = Napi::Number::New(env, ec); } else { - argv[3] = Nan::Null(); + argv[3] = env.Null(); switch (msg->err()) { case RdKafka::ERR__PARTITION_EOF: { - argv[1] = Nan::Null(); - v8::Local eofEvent = Nan::New(); - - Nan::Set(eofEvent, Nan::New("topic").ToLocalChecked(), - Nan::New(msg->topic_name()).ToLocalChecked()); - Nan::Set(eofEvent, Nan::New("offset").ToLocalChecked(), - Nan::New(msg->offset())); - Nan::Set(eofEvent, Nan::New("partition").ToLocalChecked(), - Nan::New(msg->partition())); - - argv[2] = eofEvent; - break; + argv[1] = env.Null(); + Napi::Object eofEvent = Napi::Object::New(env); + + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, msg->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, msg->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, msg->partition())); + + argv[2] = eofEvent; + break; } default: - argv[1] = Conversion::Message::ToV8Object(msg); - argv[2] = Nan::Null(); - break; + argv[1] = Conversion::Message::ToV8Object(msg); + argv[2] = env.Null(); + break; } // We can delete msg now @@ -771,15 +771,15 @@ void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, callback->Call(argc, argv); } -void KafkaConsumerConsumeLoop::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeLoop::OnOK() { + Napi::HandleScope scope(env); } -void KafkaConsumerConsumeLoop::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeLoop::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; + Napi::Value argv[argc] = { Napi::Error::New(env, ErrorMessage()) }; callback->Call(argc, argv); } @@ -795,11 +795,11 @@ void KafkaConsumerConsumeLoop::HandleErrorCallback() { * @see NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Nan::Callback *callback, - KafkaConsumer* consumer, - const uint32_t & num_messages, - const int & timeout_ms, - bool timeout_only_for_first_message) : +KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Napi::FunctionReference *callback, + KafkaConsumer* consumer, + const uint32_t & num_messages, + const int & timeout_ms, + bool timeout_only_for_first_message) : ErrorAwareWorker(callback), m_consumer(consumer), m_num_messages(num_messages), @@ -821,98 +821,97 @@ void KafkaConsumerConsumeNum::Execute() { RdKafka::Message *message = b.data(); RdKafka::ErrorCode errorCode = message->err(); switch (errorCode) { - case RdKafka::ERR__PARTITION_EOF: - // If partition EOF and have consumed messages, retry with timeout 1 - // This allows getting ready messages, while not waiting for new ones - if (m_messages.size() > eof_event_count) { - timeout_ms = 1; - } - - // We will only go into this code path when `enable.partition.eof` - // is set to true. In this case, consumer is also interested in EOF - // messages, so we return an EOF message - m_messages.push_back(message); - eof_event_count += 1; - break; - case RdKafka::ERR__TIMED_OUT: - case RdKafka::ERR__TIMED_OUT_QUEUE: - // Break of the loop if we timed out - delete message; - looping = false; - break; - case RdKafka::ERR_NO_ERROR: - m_messages.push_back(b.data()); - - // This allows getting ready messages, while not waiting for new ones. - // This is useful when we want to get the as many messages as possible - // within the timeout but not wait if we already have one or more - // messages. - if (m_timeout_only_for_first_message) { - timeout_ms = 1; - } - - break; - default: - // Set the error for any other errors and break - delete message; - if (m_messages.size() == eof_event_count) { - SetErrorBaton(Baton(errorCode)); - } - looping = false; - break; + case RdKafka::ERR__PARTITION_EOF: + // If partition EOF and have consumed messages, retry with timeout 1 + // This allows getting ready messages, while not waiting for new ones + if (m_messages.size() > eof_event_count) { + timeout_ms = 1; + } + + // We will only go into this code path when `enable.partition.eof` + // is set to true. In this case, consumer is also interested in EOF + // messages, so we return an EOF message + m_messages.push_back(message); + eof_event_count += 1; + break; + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR__TIMED_OUT_QUEUE: + // Break of the loop if we timed out + delete message; + looping = false; + break; + case RdKafka::ERR_NO_ERROR: + m_messages.push_back(b.data()); + + // This allows getting ready messages, while not waiting for new ones. + // This is useful when we want to get the as many messages as possible + // within the timeout but not wait if we already have one or more + // messages. + if (m_timeout_only_for_first_message) { + timeout_ms = 1; + } + + break; + default: + // Set the error for any other errors and break + delete message; + if (m_messages.size() == eof_event_count) { + SetErrorBaton(Baton(errorCode)); + } + looping = false; + break; } } else { if (m_messages.size() == eof_event_count) { - SetErrorBaton(b); + SetErrorBaton(b); } looping = false; } } } -void KafkaConsumerConsumeNum::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeNum::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 3; - v8::Local argv[argc]; - argv[0] = Nan::Null(); + Napi::Value argv[argc]; + argv[0] = env.Null(); - v8::Local returnArray = Nan::New(); - v8::Local eofEventsArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); + Napi::Array eofEventsArray = Napi::Array::New(env); if (m_messages.size() > 0) { int returnArrayIndex = -1; int eofEventsArrayIndex = -1; for (std::vector::iterator it = m_messages.begin(); - it != m_messages.end(); ++it) { + it != m_messages.end(); ++it) { RdKafka::Message* message = *it; switch (message->err()) { - case RdKafka::ERR_NO_ERROR: - ++returnArrayIndex; - Nan::Set(returnArray, returnArrayIndex, - Conversion::Message::ToV8Object(message)); - break; - case RdKafka::ERR__PARTITION_EOF: - ++eofEventsArrayIndex; - - // create EOF event - v8::Local eofEvent = Nan::New(); - - Nan::Set(eofEvent, Nan::New("topic").ToLocalChecked(), - Nan::New(message->topic_name()).ToLocalChecked()); - Nan::Set(eofEvent, Nan::New("offset").ToLocalChecked(), - Nan::New(message->offset())); - Nan::Set(eofEvent, Nan::New("partition").ToLocalChecked(), - Nan::New(message->partition())); - - // also store index at which position in the message array this event - // was emitted this way, we can later emit it at the right point in - // time - Nan::Set(eofEvent, - Nan::New("messageIndex").ToLocalChecked(), - Nan::New(returnArrayIndex)); - - Nan::Set(eofEventsArray, eofEventsArrayIndex, eofEvent); + case RdKafka::ERR_NO_ERROR: + ++returnArrayIndex; + (returnArray).Set(returnArrayIndex, + Conversion::Message::ToV8Object(message)); + break; + case RdKafka::ERR__PARTITION_EOF: + ++eofEventsArrayIndex; + + // create EOF event + Napi::Object eofEvent = Napi::Object::New(env); + + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, message->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, message->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, message->partition())); + + // also store index at which position in the message array this event + // was emitted this way, we can later emit it at the right point in + // time + (eofEvent).Set(Napi::String::New(env, "messageIndex"), + Napi::Number::New(env, returnArrayIndex)); + + (eofEventsArray).Set(eofEventsArrayIndex, eofEvent); } delete message; @@ -925,19 +924,19 @@ void KafkaConsumerConsumeNum::HandleOKCallback() { callback->Call(argc, argv); } -void KafkaConsumerConsumeNum::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeNum::OnError() { + Napi::HandleScope scope(env); if (m_messages.size() > 0) { for (std::vector::iterator it = m_messages.begin(); - it != m_messages.end(); ++it) { + it != m_messages.end(); ++it) { RdKafka::Message* message = *it; delete message; } } const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -953,9 +952,9 @@ void KafkaConsumerConsumeNum::HandleErrorCallback() { * @see NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsume::KafkaConsumerConsume(Nan::Callback *callback, - KafkaConsumer* consumer, - const int & timeout_ms) : +KafkaConsumerConsume::KafkaConsumerConsume(Napi::FunctionReference *callback, + KafkaConsumer* consumer, + const int & timeout_ms) : ErrorAwareWorker(callback), consumer(consumer), m_timeout_ms(timeout_ms) {} @@ -977,13 +976,13 @@ void KafkaConsumerConsume::Execute() { } } -void KafkaConsumerConsume::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsume::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Message::ToV8Object(m_message); delete m_message; @@ -991,11 +990,11 @@ void KafkaConsumerConsume::HandleOKCallback() { callback->Call(argc, argv); } -void KafkaConsumerConsume::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsume::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1010,10 +1009,10 @@ void KafkaConsumerConsume::HandleErrorCallback() { * @see RdKafka::KafkaConsumer::Committed */ -KafkaConsumerCommitted::KafkaConsumerCommitted(Nan::Callback *callback, - KafkaConsumer* consumer, - std::vector & t, - const int & timeout_ms) : +KafkaConsumerCommitted::KafkaConsumerCommitted(Napi::FunctionReference *callback, + KafkaConsumer* consumer, + std::vector & t, + const int & timeout_ms) : ErrorAwareWorker(callback), m_consumer(consumer), m_topic_partitions(t), @@ -1031,35 +1030,35 @@ void KafkaConsumerCommitted::Execute() { } } -void KafkaConsumerCommitted::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitted::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::TopicPartition::ToV8Array(m_topic_partitions); callback->Call(argc, argv); } -void KafkaConsumerCommitted::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitted::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } /** * @brief KafkaConsumer commit offsets with a callback function. - * + * * The first callback argument is the commit error, or null on success. * * @see RdKafka::KafkaConsumer::commitSync */ -KafkaConsumerCommitCb::KafkaConsumerCommitCb(Nan::Callback *callback, +KafkaConsumerCommitCb::KafkaConsumerCommitCb(Napi::FunctionReference *callback, KafkaConsumer* consumer, std::optional> & t) : ErrorAwareWorker(callback), @@ -1084,22 +1083,22 @@ void KafkaConsumerCommitCb::Execute() { } } -void KafkaConsumerCommitCb::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitCb::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void KafkaConsumerCommitCb::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitCb::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1116,10 +1115,10 @@ void KafkaConsumerCommitCb::HandleErrorCallback() { * seek to work. Use assign() to set the starting offset. */ -KafkaConsumerSeek::KafkaConsumerSeek(Nan::Callback *callback, - KafkaConsumer* consumer, - const RdKafka::TopicPartition * toppar, - const int & timeout_ms) : +KafkaConsumerSeek::KafkaConsumerSeek(Napi::FunctionReference *callback, + KafkaConsumer* consumer, + const RdKafka::TopicPartition * toppar, + const int & timeout_ms) : ErrorAwareWorker(callback), m_consumer(consumer), m_toppar(toppar), @@ -1146,22 +1145,22 @@ void KafkaConsumerSeek::Execute() { } } -void KafkaConsumerSeek::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerSeek::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void KafkaConsumerSeek::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerSeek::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1172,10 +1171,10 @@ void KafkaConsumerSeek::HandleErrorCallback() { * This callback will create a topic * */ -AdminClientCreateTopic::AdminClientCreateTopic(Nan::Callback *callback, - AdminClient* client, - rd_kafka_NewTopic_t* topic, - const int & timeout_ms) : +AdminClientCreateTopic::AdminClientCreateTopic(Napi::FunctionReference *callback, + AdminClient* client, + rd_kafka_NewTopic_t* topic, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_topic(topic), @@ -1193,22 +1192,22 @@ void AdminClientCreateTopic::Execute() { } } -void AdminClientCreateTopic::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientCreateTopic::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientCreateTopic::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientCreateTopic::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1219,10 +1218,10 @@ void AdminClientCreateTopic::HandleErrorCallback() { * This callback will delete a topic * */ -AdminClientDeleteTopic::AdminClientDeleteTopic(Nan::Callback *callback, - AdminClient* client, - rd_kafka_DeleteTopic_t* topic, - const int & timeout_ms) : +AdminClientDeleteTopic::AdminClientDeleteTopic(Napi::FunctionReference *callback, + AdminClient* client, + rd_kafka_DeleteTopic_t* topic, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_topic(topic), @@ -1240,22 +1239,22 @@ void AdminClientDeleteTopic::Execute() { } } -void AdminClientDeleteTopic::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteTopic::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientDeleteTopic::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteTopic::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1267,10 +1266,10 @@ void AdminClientDeleteTopic::HandleErrorCallback() { * */ AdminClientCreatePartitions::AdminClientCreatePartitions( - Nan::Callback *callback, - AdminClient* client, - rd_kafka_NewPartitions_t* partitions, - const int & timeout_ms) : + Napi::FunctionReference *callback, + AdminClient* client, + rd_kafka_NewPartitions_t* partitions, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_partitions(partitions), @@ -1288,22 +1287,22 @@ void AdminClientCreatePartitions::Execute() { } } -void AdminClientCreatePartitions::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientCreatePartitions::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientCreatePartitions::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientCreatePartitions::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1315,7 +1314,7 @@ void AdminClientCreatePartitions::HandleErrorCallback() { * */ AdminClientListGroups::AdminClientListGroups( - Nan::Callback* callback, AdminClient* client, bool is_match_states_set, + Napi::FunctionReference* callback, AdminClient* client, bool is_match_states_set, std::vector& match_states, const int& timeout_ms) : ErrorAwareWorker(callback), @@ -1332,19 +1331,19 @@ AdminClientListGroups::~AdminClientListGroups() { void AdminClientListGroups::Execute() { Baton b = m_client->ListGroups(m_is_match_states_set, m_match_states, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientListGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); const rd_kafka_ListConsumerGroups_result_t* result = rd_kafka_event_ListConsumerGroups_result(m_event_response); @@ -1354,11 +1353,11 @@ void AdminClientListGroups::HandleOKCallback() { callback->Call(argc, argv); } -void AdminClientListGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1370,7 +1369,7 @@ void AdminClientListGroups::HandleErrorCallback() { * */ AdminClientDescribeGroups::AdminClientDescribeGroups( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, std::vector& groups, bool include_authorized_operations, const int& timeout_ms) : ErrorAwareWorker(callback), @@ -1387,30 +1386,30 @@ AdminClientDescribeGroups::~AdminClientDescribeGroups() { void AdminClientDescribeGroups::Execute() { Baton b = m_client->DescribeGroups(m_groups, m_include_authorized_operations, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientDescribeGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDescribeGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDescribeConsumerGroupsResult( rd_kafka_event_DescribeConsumerGroups_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDescribeGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDescribeGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1422,7 +1421,7 @@ void AdminClientDescribeGroups::HandleErrorCallback() { * */ AdminClientDeleteGroups::AdminClientDeleteGroups( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_DeleteGroup_t **group_list, size_t group_cnt, const int& timeout_ms) @@ -1445,30 +1444,30 @@ AdminClientDeleteGroups::~AdminClientDeleteGroups() { void AdminClientDeleteGroups::Execute() { Baton b = m_client->DeleteGroups(m_group_list, m_group_cnt, m_timeout_ms, - &m_event_response); + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientDeleteGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDeleteGroupsResult( rd_kafka_event_DeleteGroups_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDeleteGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1481,7 +1480,7 @@ void AdminClientDeleteGroups::HandleErrorCallback() { * */ AdminClientListConsumerGroupOffsets::AdminClientListConsumerGroupOffsets( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_ListConsumerGroupOffsets_t **req, size_t req_cnt, const bool require_stable_offsets, @@ -1506,30 +1505,30 @@ AdminClientListConsumerGroupOffsets::~AdminClientListConsumerGroupOffsets() { void AdminClientListConsumerGroupOffsets::Execute() { Baton b = m_client->ListConsumerGroupOffsets(m_req, m_req_cnt, - m_require_stable_offsets, - m_timeout_ms, &m_event_response); + m_require_stable_offsets, + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientListConsumerGroupOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListConsumerGroupOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromListConsumerGroupOffsetsResult( rd_kafka_event_ListConsumerGroupOffsets_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientListConsumerGroupOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListConsumerGroupOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1542,7 +1541,7 @@ void AdminClientListConsumerGroupOffsets::HandleErrorCallback() { * */ AdminClientDeleteRecords::AdminClientDeleteRecords( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_DeleteRecords_t **del_records, size_t del_records_cnt, const int& operation_timeout_ms, @@ -1567,42 +1566,42 @@ AdminClientDeleteRecords::~AdminClientDeleteRecords() { void AdminClientDeleteRecords::Execute() { Baton b = m_client->DeleteRecords(m_del_records, m_del_records_cnt, - m_operation_timeout_ms, m_timeout_ms, - &m_event_response); + m_operation_timeout_ms, m_timeout_ms, + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientDeleteRecords::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteRecords::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDeleteRecordsResult( rd_kafka_event_DeleteRecords_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDeleteRecords::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteRecords::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } /** * @brief Describe Topics in an asynchronous worker - * + * * This callback will describe topics. */ AdminClientDescribeTopics::AdminClientDescribeTopics( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_TopicCollection_t* topics, const bool include_authorized_operations, const int& timeout_ms) @@ -1624,40 +1623,40 @@ AdminClientDescribeTopics::~AdminClientDescribeTopics() { void AdminClientDescribeTopics::Execute() { Baton b = m_client->DescribeTopics(m_topics, m_include_authorized_operations, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientDescribeTopics::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDescribeTopics::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDescribeTopicsResult( rd_kafka_event_DescribeTopics_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDescribeTopics::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDescribeTopics::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } /** * @brief ListOffsets in an asynchronous worker - * + * * This callback will list requested offsets for the specified topic partitions. */ AdminClientListOffsets::AdminClientListOffsets( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_topic_partition_list_t* partitions, const int& timeout_ms, rd_kafka_IsolationLevel_t isolation_level) : ErrorAwareWorker(callback), @@ -1678,29 +1677,29 @@ AdminClientListOffsets::~AdminClientListOffsets() { void AdminClientListOffsets::Execute() { Baton b = m_client->ListOffsets(m_partitions, m_timeout_ms, m_isolation_level, - &m_event_response); + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } } -void AdminClientListOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromListOffsetsResult( rd_kafka_event_ListOffsets_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientListOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } diff --git a/src/workers.h b/src/workers.h index b9583823..0194e68e 100644 --- a/src/workers.h +++ b/src/workers.h @@ -12,7 +12,8 @@ #define SRC_WORKERS_H_ #include -#include +#include +#include #include #include #include @@ -26,27 +27,25 @@ namespace NodeKafka { namespace Workers { -class ErrorAwareWorker : public Nan::AsyncWorker { +class ErrorAwareWorker : public Napi::AsyncWorker { public: - explicit ErrorAwareWorker(Nan::Callback* callback_) : - Nan::AsyncWorker(callback_), + explicit ErrorAwareWorker(Napi::FunctionReference* callback_) : + Napi::AsyncWorker(callback_->Value()), m_baton(RdKafka::ERR_NO_ERROR) {} virtual ~ErrorAwareWorker() {} virtual void Execute() = 0; - virtual void HandleOKCallback() = 0; - void HandleErrorCallback() { - Nan::HandleScope scope; + virtual void OnOK() = 0; + void OnError(const Napi::Error &e) { + Napi::Env env = e.Env(); + Napi::HandleScope scope(env); // Construct error and add code to it. - v8::Local error = Nan::Error(ErrorMessage()); - Nan::Set(error.As(), Nan::New("code").ToLocalChecked(), - Nan::New(GetErrorCode())); - - const unsigned int argc = 1; - v8::Local argv[argc] = { error }; + Napi::Error error = Napi::Error::New(env, e.Message()); + (error.Value().As()).Set(Napi::String::New(env, "code"), + Napi::Number::New(env, GetErrorCode())); - callback->Call(argc, argv); + Napi::AsyncWorker::OnError(error); } protected: @@ -59,15 +58,15 @@ class ErrorAwareWorker : public Nan::AsyncWorker { } void SetErrorBaton(const NodeKafka::Baton & baton) { m_baton = baton; - SetErrorMessage(m_baton.errstr().c_str()); + SetError(m_baton.errstr().c_str()); } int GetErrorCode() { return m_baton.err(); } - v8::Local GetErrorObject() { - return m_baton.ToObject(); + Napi::Error GetErrorObject(const Napi::Env &env) { + return m_baton.ToError(env); } Baton m_baton; @@ -75,7 +74,7 @@ class ErrorAwareWorker : public Nan::AsyncWorker { class MessageWorker : public ErrorAwareWorker { public: - explicit MessageWorker(Nan::Callback* callback_) + explicit MessageWorker(Napi::FunctionReference* callback_) : ErrorAwareWorker(callback_), m_asyncdata() { m_async = new uv_async_t; uv_async_init( @@ -92,9 +91,10 @@ class MessageWorker : public ErrorAwareWorker { } void WorkMessage() { - if (!callback) { - return; - } + // TODO: is callback ever NULL? + // if (!callback) { + // return; + // } std::vector message_queue; std::vector warning_queue; @@ -158,12 +158,12 @@ class MessageWorker : public ErrorAwareWorker { uv_async_send(m_async); } - NAN_INLINE static NAUV_WORK_CB(m_async_message) { + inline static void m_async_message(uv_async_t *async) { MessageWorker *worker = static_cast(async->data); worker->WorkMessage(); } - NAN_INLINE static void AsyncClose_(uv_handle_t* handle) { + inline static void AsyncClose_(uv_handle_t* handle) { MessageWorker *worker = static_cast(handle->data); delete reinterpret_cast(handle); delete worker; @@ -176,36 +176,37 @@ class MessageWorker : public ErrorAwareWorker { }; namespace Handle { -class OffsetsForTimes : public ErrorAwareWorker { - public: - OffsetsForTimes(Nan::Callback*, NodeKafka::Connection*, - std::vector &, - const int &); - ~OffsetsForTimes(); - - void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); - - private: - NodeKafka::Connection * m_handle; - std::vector m_topic_partitions; - const int m_timeout_ms; -}; + using NodeKafka::Connection; + template class OffsetsForTimes : public ErrorAwareWorker { + public: + OffsetsForTimes(Napi::FunctionReference*, Connection*, + std::vector &, + const int &); + ~OffsetsForTimes(); + + void Execute(); + void OnOK(); + void OnError(); + + private: + Connection * m_handle; + std::vector m_topic_partitions; + const int m_timeout_ms; + }; } // namespace Handle -class ConnectionMetadata : public ErrorAwareWorker { - public: - ConnectionMetadata(Nan::Callback*, NodeKafka::Connection*, - std::string, int, bool); +template class ConnectionMetadata : public ErrorAwareWorker { +public: + ConnectionMetadata(Napi::FunctionReference*, Connection*, + std::string, int, bool); ~ConnectionMetadata(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); - private: - NodeKafka::Connection * m_connection; +private: + Connection * m_connection; std::string m_topic; int m_timeout_ms; bool m_all_topics; @@ -213,18 +214,18 @@ class ConnectionMetadata : public ErrorAwareWorker { RdKafka::Metadata* m_metadata; }; -class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { +template class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { public: - ConnectionQueryWatermarkOffsets(Nan::Callback*, NodeKafka::Connection*, + ConnectionQueryWatermarkOffsets(Napi::FunctionReference*, Connection*, std::string, int32_t, int); ~ConnectionQueryWatermarkOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: - NodeKafka::Connection * m_connection; + Connection * m_connection; std::string m_topic; int32_t m_partition; int m_timeout_ms; @@ -235,12 +236,12 @@ class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { class ProducerConnect : public ErrorAwareWorker { public: - ProducerConnect(Nan::Callback*, NodeKafka::Producer*); + ProducerConnect(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerConnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -248,12 +249,12 @@ class ProducerConnect : public ErrorAwareWorker { class ProducerDisconnect : public ErrorAwareWorker { public: - ProducerDisconnect(Nan::Callback*, NodeKafka::Producer*); + ProducerDisconnect(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerDisconnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -261,11 +262,11 @@ class ProducerDisconnect : public ErrorAwareWorker { class ProducerFlush : public ErrorAwareWorker { public: - ProducerFlush(Nan::Callback*, NodeKafka::Producer*, int); + ProducerFlush(Napi::FunctionReference*, NodeKafka::Producer*, int); ~ProducerFlush(); void Execute(); - void HandleOKCallback(); + void OnOK(); private: NodeKafka::Producer * producer; @@ -274,12 +275,12 @@ class ProducerFlush : public ErrorAwareWorker { class ProducerInitTransactions : public ErrorAwareWorker { public: - ProducerInitTransactions(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerInitTransactions(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerInitTransactions(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -288,12 +289,12 @@ class ProducerInitTransactions : public ErrorAwareWorker { class ProducerBeginTransaction : public ErrorAwareWorker { public: - ProducerBeginTransaction(Nan::Callback*, NodeKafka::Producer*); + ProducerBeginTransaction(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerBeginTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -301,12 +302,12 @@ class ProducerBeginTransaction : public ErrorAwareWorker { class ProducerCommitTransaction : public ErrorAwareWorker { public: - ProducerCommitTransaction(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerCommitTransaction(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerCommitTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -315,12 +316,12 @@ class ProducerCommitTransaction : public ErrorAwareWorker { class ProducerAbortTransaction : public ErrorAwareWorker { public: - ProducerAbortTransaction(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerAbortTransaction(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerAbortTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -330,15 +331,15 @@ class ProducerAbortTransaction : public ErrorAwareWorker { class ProducerSendOffsetsToTransaction : public ErrorAwareWorker { public: ProducerSendOffsetsToTransaction( - Nan::Callback*, NodeKafka::Producer*, + Napi::FunctionReference*, NodeKafka::Producer*, std::vector &, KafkaConsumer*, const int &); ~ProducerSendOffsetsToTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -349,12 +350,12 @@ class ProducerSendOffsetsToTransaction : public ErrorAwareWorker { class KafkaConsumerConnect : public ErrorAwareWorker { public: - KafkaConsumerConnect(Nan::Callback*, NodeKafka::KafkaConsumer*); + KafkaConsumerConnect(Napi::FunctionReference*, NodeKafka::KafkaConsumer*); ~KafkaConsumerConnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; @@ -362,12 +363,12 @@ class KafkaConsumerConnect : public ErrorAwareWorker { class KafkaConsumerDisconnect : public ErrorAwareWorker { public: - KafkaConsumerDisconnect(Nan::Callback*, NodeKafka::KafkaConsumer*); + KafkaConsumerDisconnect(Napi::FunctionReference*, NodeKafka::KafkaConsumer*); ~KafkaConsumerDisconnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; @@ -375,15 +376,15 @@ class KafkaConsumerDisconnect : public ErrorAwareWorker { class KafkaConsumerConsumeLoop : public MessageWorker { public: - KafkaConsumerConsumeLoop(Nan::Callback*, + KafkaConsumerConsumeLoop(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const int &, const int &); ~KafkaConsumerConsumeLoop(); static void ConsumeLoop(void *arg); void Close(); void Execute(const ExecutionMessageBus&); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); void HandleMessageCallback(RdKafka::Message*, RdKafka::ErrorCode); private: uv_thread_t thread_event_loop; @@ -396,12 +397,12 @@ class KafkaConsumerConsumeLoop : public MessageWorker { class KafkaConsumerConsume : public ErrorAwareWorker { public: - KafkaConsumerConsume(Nan::Callback*, NodeKafka::KafkaConsumer*, const int &); + KafkaConsumerConsume(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const int &); ~KafkaConsumerConsume(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; const int m_timeout_ms; @@ -410,14 +411,14 @@ class KafkaConsumerConsume : public ErrorAwareWorker { class KafkaConsumerCommitted : public ErrorAwareWorker { public: - KafkaConsumerCommitted(Nan::Callback*, + KafkaConsumerCommitted(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, std::vector &, const int &); ~KafkaConsumerCommitted(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; std::vector m_topic_partitions; @@ -426,14 +427,14 @@ class KafkaConsumerCommitted : public ErrorAwareWorker { class KafkaConsumerCommitCb : public ErrorAwareWorker { public: - KafkaConsumerCommitCb(Nan::Callback*, + KafkaConsumerCommitCb(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, std::optional> &); ~KafkaConsumerCommitCb(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; std::optional> m_topic_partitions; @@ -441,13 +442,13 @@ class KafkaConsumerCommitCb : public ErrorAwareWorker { class KafkaConsumerSeek : public ErrorAwareWorker { public: - KafkaConsumerSeek(Nan::Callback*, NodeKafka::KafkaConsumer*, + KafkaConsumerSeek(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const RdKafka::TopicPartition *, const int &); ~KafkaConsumerSeek(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; const RdKafka::TopicPartition * m_toppar; @@ -456,13 +457,13 @@ class KafkaConsumerSeek : public ErrorAwareWorker { class KafkaConsumerConsumeNum : public ErrorAwareWorker { public: - KafkaConsumerConsumeNum(Nan::Callback*, NodeKafka::KafkaConsumer*, + KafkaConsumerConsumeNum(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const uint32_t &, const int &, bool); ~KafkaConsumerConsumeNum(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; const uint32_t m_num_messages; @@ -476,13 +477,13 @@ class KafkaConsumerConsumeNum : public ErrorAwareWorker { */ class AdminClientCreateTopic : public ErrorAwareWorker { public: - AdminClientCreateTopic(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientCreateTopic(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_NewTopic_t*, const int &); ~AdminClientCreateTopic(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_NewTopic_t* m_topic; @@ -494,13 +495,13 @@ class AdminClientCreateTopic : public ErrorAwareWorker { */ class AdminClientDeleteTopic : public ErrorAwareWorker { public: - AdminClientDeleteTopic(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientDeleteTopic(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_DeleteTopic_t*, const int &); ~AdminClientDeleteTopic(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_DeleteTopic_t* m_topic; @@ -512,13 +513,13 @@ class AdminClientDeleteTopic : public ErrorAwareWorker { */ class AdminClientCreatePartitions : public ErrorAwareWorker { public: - AdminClientCreatePartitions(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientCreatePartitions(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_NewPartitions_t*, const int &); ~AdminClientCreatePartitions(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_NewPartitions_t* m_partitions; @@ -530,14 +531,14 @@ class AdminClientCreatePartitions : public ErrorAwareWorker { */ class AdminClientListGroups : public ErrorAwareWorker { public: - AdminClientListGroups(Nan::Callback *, NodeKafka::AdminClient *, bool, - std::vector &, - const int &); + AdminClientListGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, bool, + std::vector &, + const int &); ~AdminClientListGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -552,13 +553,13 @@ class AdminClientListGroups : public ErrorAwareWorker { */ class AdminClientDescribeGroups : public ErrorAwareWorker { public: - AdminClientDescribeGroups(Nan::Callback *, NodeKafka::AdminClient *, - std::vector &, bool, const int &); + AdminClientDescribeGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, + std::vector &, bool, const int &); ~AdminClientDescribeGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -573,13 +574,13 @@ class AdminClientDescribeGroups : public ErrorAwareWorker { */ class AdminClientDeleteGroups : public ErrorAwareWorker { public: - AdminClientDeleteGroups(Nan::Callback *, NodeKafka::AdminClient *, - rd_kafka_DeleteGroup_t **, size_t, const int &); + AdminClientDeleteGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, + rd_kafka_DeleteGroup_t **, size_t, const int &); ~AdminClientDeleteGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -594,14 +595,14 @@ class AdminClientDeleteGroups : public ErrorAwareWorker { */ class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { public: - AdminClientListConsumerGroupOffsets(Nan::Callback *, NodeKafka::AdminClient *, - rd_kafka_ListConsumerGroupOffsets_t **, size_t, bool, - const int &); + AdminClientListConsumerGroupOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, + rd_kafka_ListConsumerGroupOffsets_t **, size_t, bool, + const int &); ~AdminClientListConsumerGroupOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -617,14 +618,14 @@ class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { */ class AdminClientDeleteRecords : public ErrorAwareWorker { public: - AdminClientDeleteRecords(Nan::Callback *, NodeKafka::AdminClient *, - rd_kafka_DeleteRecords_t **, size_t, const int &, - const int &); + AdminClientDeleteRecords(Napi::FunctionReference *, NodeKafka::AdminClient *, + rd_kafka_DeleteRecords_t **, size_t, const int &, + const int &); ~AdminClientDeleteRecords(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -640,14 +641,14 @@ class AdminClientDeleteRecords : public ErrorAwareWorker { */ class AdminClientDescribeTopics : public ErrorAwareWorker { public: - AdminClientDescribeTopics(Nan::Callback *, NodeKafka::AdminClient *, - rd_kafka_TopicCollection_t *, const bool, - const int &); + AdminClientDescribeTopics(Napi::FunctionReference *, NodeKafka::AdminClient *, + rd_kafka_TopicCollection_t *, const bool, + const int &); ~AdminClientDescribeTopics(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -662,14 +663,14 @@ class AdminClientDescribeTopics : public ErrorAwareWorker { */ class AdminClientListOffsets : public ErrorAwareWorker { public: - AdminClientListOffsets(Nan::Callback *, NodeKafka::AdminClient *, - rd_kafka_topic_partition_list_t *, const int &, - rd_kafka_IsolationLevel_t); + AdminClientListOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, + rd_kafka_topic_partition_list_t *, const int &, + rd_kafka_IsolationLevel_t); ~AdminClientListOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client;