diff --git a/README.md b/README.md
index f2d3f0a9a..1a88055c3 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ For more information, you can refer to [PFRL's documentation](http://pfrl.readth
## Algorithms
-| Algorithm | Discrete Action | Continous Action | Recurrent Model | Batch Training | CPU Async Training | Pretrained models* |
+| Algorithm | Discrete Action | Continuous Action | Recurrent Model | Batch Training | CPU Async Training | Pretrained models* |
|:----------|:---------------:|:----------------:|:---------------:|:--------------:|:------------------:|:------------------:|
| DQN (including DoubleDQN etc.) | ✓ | ✓ (NAF) | ✓ | ✓ | x | ✓ |
| Categorical DQN | ✓ | x | ✓ | ✓ | x | x |
diff --git a/pfrl/agents/trpo.py b/pfrl/agents/trpo.py
index 613b6258e..312185b95 100644
--- a/pfrl/agents/trpo.py
+++ b/pfrl/agents/trpo.py
@@ -687,7 +687,7 @@ def evaluate_current_policy():
step_size *= 0.5
else:
self.logger.info(
- "Line search coundn't find a good step size. The policy was not"
+ "Line search couldn't find a good step size. The policy was not"
" updated."
)
self.policy_step_size_record.append(0.0)
diff --git a/pfrl/experiments/prepare_output_dir.py b/pfrl/experiments/prepare_output_dir.py
index f82a95289..0c2387abc 100644
--- a/pfrl/experiments/prepare_output_dir.py
+++ b/pfrl/experiments/prepare_output_dir.py
@@ -23,7 +23,7 @@ def generate_exp_id(prefix=None, argv=sys.argv) -> str:
checksum, git diff from HEAD and command line arguments.
Returns:
- A generated experiment id in string (str) which if avialable
+ A generated experiment id in string (str) which if available
for directory name
"""
@@ -76,7 +76,7 @@ def prepare_output_dir(
"""Prepare a directory for outputting training results.
An output directory, which ends with the current datetime string,
- is created. Then the following infomation is saved into the directory:
+ is created. Then the following information is saved into the directory:
args.txt: argument values and arbitrary parameters
command.txt: command itself