Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
kianmeng committed Dec 24, 2021
1 parent de27a37 commit 7084e6b
Show file tree
Hide file tree
Showing 10 changed files with 23 additions and 23 deletions.
6 changes: 3 additions & 3 deletions changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@
- Moved make_ref to function impl from record definition

* 2.2.0
- Add truely async send API `kpro:send/2`
- Add truly async send API `kpro:send/2`

* 2.1.2
- Bump crc32cer to 0.1.3 to support alpine/busybox build
Expand All @@ -203,7 +203,7 @@

* 2.1.0
- Simplify batch input. Batch magic version is derived from produce API version.
no longer depends on batch input format to determin magic version.
no longer depends on batch input format to determine magic version.

* 2.0.1
- Bump `crc32cer` to from `0.1.0` to `0.1.1` to fix build issue in OSX
Expand Down Expand Up @@ -245,6 +245,6 @@
* 0.9 -> 1.0 incompatible changes
- changed from erlang.mk to rebar/rebar3
- encode inputs from records to proplists
- decode ouptuts from records to proplists
- decode outputs from records to proplists
- `kpro:fetch_request`, `kpro:offsets_request` APIs have args list changed
- Maximum correlation ID changed to (1 bsl 24 - 1)
12 changes: 6 additions & 6 deletions priv/kafka.bnf
Original file line number Diff line number Diff line change
Expand Up @@ -4265,9 +4265,9 @@ TxnOffsetCommitRequestV0 => transactional_id group_id producer_id producer_epoch
# group_id: The ID of the group.
# producer_id: The current producer ID in use by the transactional ID.
# producer_epoch: The current epoch associated with the producer ID.
# topics: Each topic that we want to committ offsets for.
# topics: Each topic that we want to commit offsets for.
# name: The topic name.
# partitions: The partitions inside the topic that we want to committ offsets for.
# partitions: The partitions inside the topic that we want to commit offsets for.
# partition_index: The index of the partition within the topic.
# committed_offset: The message offset to be committed.
# committed_metadata: Any associated metadata the client wants to keep.
Expand All @@ -4289,9 +4289,9 @@ TxnOffsetCommitRequestV1 => transactional_id group_id producer_id producer_epoch
# group_id: The ID of the group.
# producer_id: The current producer ID in use by the transactional ID.
# producer_epoch: The current epoch associated with the producer ID.
# topics: Each topic that we want to committ offsets for.
# topics: Each topic that we want to commit offsets for.
# name: The topic name.
# partitions: The partitions inside the topic that we want to committ offsets for.
# partitions: The partitions inside the topic that we want to commit offsets for.
# partition_index: The index of the partition within the topic.
# committed_offset: The message offset to be committed.
# committed_metadata: Any associated metadata the client wants to keep.
Expand All @@ -4314,9 +4314,9 @@ TxnOffsetCommitRequestV2 => transactional_id group_id producer_id producer_epoch
# group_id: The ID of the group.
# producer_id: The current producer ID in use by the transactional ID.
# producer_epoch: The current epoch associated with the producer ID.
# topics: Each topic that we want to committ offsets for.
# topics: Each topic that we want to commit offsets for.
# name: The topic name.
# partitions: The partitions inside the topic that we want to committ offsets for.
# partitions: The partitions inside the topic that we want to commit offsets for.
# partition_index: The index of the partition within the topic.
# committed_offset: The message offset to be committed.
# committed_leader_epoch: The leader epoch of the last consumed record.
Expand Down
4 changes: 2 additions & 2 deletions src/kpro.erl
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@
-type batch_meta_key() ::
is_transaction % if this message was produced in a transaction
| is_control % for read_uncommitted clients
| last_offset % client wont have to do lists:last(Messages)
| last_offset % client won't have to do lists:last(Messages)
| max_ts % client don't have to scan the messages for max
| producer_id. % it can be referenced by a future fetch response in
% its aborted_transactions field
Expand Down Expand Up @@ -411,7 +411,7 @@ connect_controller(Bootstrap, ConnConfig, Opts) ->

%% @doc Connect to group or transaction coordinator.
%% If the first arg is not a connection pid but a list of bootstrapping
%% endpoints, it will frist try to connect to any of the nodes
%% endpoints, it will first try to connect to any of the nodes
%% NOTE: 'txn' type only applicable to kafka 0.11 or later
-spec connect_coordinator(connection() | [endpoint()], conn_config(),
#{ type => coordinator_type()
Expand Down
2 changes: 1 addition & 1 deletion src/kpro_batch_v01.erl
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ decode_loop(<<O:64/?INT, L:32/?INT, Body:L/binary, Rest/binary>>, OuterMsg, Acc)

%% Assign relative offsets to help kafka save some CPU when compressed.
%% Kafka will decompress to validate CRC, and assign real or relative offsets
%% depending on kafka verson and/or broker config. For 0.10 or later if relative
%% depending on kafka version and/or broker config. For 0.10 or later if relative
%% offsets are correctly assigned by producer, kafka will take the original
%% compressed batch as-is instead of reassign offsets then re-compress.
%% ref: https://cwiki.apache.org/confluence/display/KAFKA/ \
Expand Down
4 changes: 2 additions & 2 deletions src/kpro_brokers.erl
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ connect_partition_leader(Bootstrap, Config, Topic, Partition, Opts) ->

%% @doc Connect group or transaction coordinator.
%% If the first arg is not a connection pid but a list of bootstrapping
%% endpoints, it will frist try to connect to any of the nodes
%% endpoints, it will first try to connect to any of the nodes
%% NOTE: 'txn' type only applicable to kafka 0.11 or later
-spec connect_coordinator(connection() | [endpoint()], config(),
#{ type => kpro:coordinator_type()
Expand All @@ -98,7 +98,7 @@ connect_coordinator(Bootstrap, Config, #{ type := Type
DiscoverFun = fun(Conn) -> discover_coordinator(Conn, Type, Id, Timeout) end,
discover_and_connect(DiscoverFun, Bootstrap, Config, Timeout).

%% @doc Conect to the controller broker of the kafka cluster.
%% @doc Connect to the controller broker of the kafka cluster.
-spec connect_controller(connection() | [endpoint()], config(),
#{timeout => timeout()}) ->
{ok, connection()} | {error, any()}.
Expand Down
4 changes: 2 additions & 2 deletions src/kpro_consumer_group.erl
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,13 @@
%% Value Fields (when key version is 0 | 1):
%% version :: 0 | 1
%% offset :: integer()
%% metdata :: binary()
%% metadata :: binary()
%% timestamp :: integer() when version = 0
%% commit_time :: integer() when version = 1
%% expire_time :: integer() when version = 1
%%
%% Value Fields (when key version is 2):
%% version :: integer() %% up to the consuemr implementation
%% version :: integer() %% up to the consumer implementation
%% protocol_type :: binary() %% should be `<<"consumer">>' but not must
%% generation_id :: integer()
%% protocol :: binary() %% `<<"roundrobin">>' etc.
Expand Down
2 changes: 1 addition & 1 deletion src/kpro_lib.erl
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ send_and_recv(#kpro_req{api = API, vsn = Vsn} = Req,

%% @doc Function pipeline.
%% The first function takes no args, all succeeding ones should be arity-0 or 1
%% functions. All functions should retrun
%% functions. All functions should return
%% `ok' | `{ok, Result}' | `{error, Reason}'.
%% where `Result' is the input arg of the next function,
%% or the result of pipeline if it's the last pipe node.
Expand Down
8 changes: 4 additions & 4 deletions src/kpro_req_lib.erl
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,13 @@ metadata_topics_list(_, all) -> ?kpro_null;
metadata_topics_list(_, Names) ->
[#{name => Name, tagged_fields => #{}} || Name <- Names].

%% @doc Help function to contruct a `list_offset' request
%% @doc Help function to construct a `list_offset' request
%% against one single topic-partition.
-spec list_offsets(vsn(), topic(), partition(), msg_ts()) -> req().
list_offsets(Vsn, Topic, Partition, Time) ->
list_offsets(Vsn, Topic, Partition, Time, ?kpro_read_committed).

%% @doc Help function to contruct a `list_offset' request against one single
%% @doc Help function to construct a `list_offset' request against one single
%% topic-partition. In transactional mode,
%% set `IsolationLevel = ?kpro_read_uncommitted' to list uncommitted offsets.
-spec list_offsets(vsn(), topic(), partition(),
Expand Down Expand Up @@ -205,7 +205,7 @@ fetch(Vsn, Topic, Partition, Offset, Opts) ->
{log_start_offset, -1}, %% irelevant to clients
{current_leader_epoch, LeaderEpoch}
]]}]]},
% we alwyas fetch from one single topic-partition
% we always fetch from one single topic-partition
% never need to forget any
{forgotten_topics_data, []},
{rack_id, RackID}
Expand Down Expand Up @@ -510,7 +510,7 @@ translate([key_type | _], Value) ->
end;
translate(_Stack, Value) -> Value.

%% Encode prmitives.
%% Encode primitives.
encode(Type, Value) -> kpro_lib:encode(Type, Value).

bin(X) -> iolist_to_binary(X).
Expand Down
2 changes: 1 addition & 1 deletion src/kpro_rsp_lib.erl
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ dec_struct([{Name, FieldSc} | Schema], Fields, Stack, Bin) ->

%%%_* Internal functions =======================================================

%% Decode prmitives.
%% Decode primitives.
dec(Type, Bin) -> kpro_lib:decode(Type, Bin).

decode_struct(API, Vsn, Bin) ->
Expand Down
2 changes: 1 addition & 1 deletion test/kpro_group_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ test_full_flow(KafkaVsn) ->
% send hartbeats, there should be a generation_id in heartbeat requests,
% generation bumps whenever there is a group re-balance, however since
% we are testing with only one group member, we do not expect any group
% rebalancing, hence generation_id should not change (alwyas send the same)
% rebalancing, hence generation_id should not change (always send the same)
F = fun() ->
heartbeat(Connection, GroupId, MemberId, Generation, KafkaVsn)
end,
Expand Down

0 comments on commit 7084e6b

Please sign in to comment.