You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
2021-08-11 22:09:32,774 ERROR trial_runner.py:482 -- Error processing event.
Traceback (most recent call last):
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 426, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 378, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/worker.py", line 1457, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::PPO.train() (pid=29651, ip=127.0.1.1)
File "python/ray/_raylet.pyx", line 636, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 619, in ray._raylet.execute_task.function_executor
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 444, in train
raise e
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 433, in train
result = Trainable.train(self)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/tune/trainable.py", line 176, in train
result = self._train()
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 129, in _train
fetches = self.optimizer.step()
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/optimizers/multi_gpu_optimizer.py", line 140, in step
self.num_envs_per_worker, self.train_batch_size)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/optimizers/rollout.py", line 29, in collect_samples
next_sample = ray_get_and_free(fut_sample)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/utils/memory.py", line 33, in ray_get_and_free
result = ray.get(object_ids)
ray.exceptions.RayTaskError(ValueError): ray::RolloutWorker.sample() (pid=30009, ip=127.0.1.1)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/utils/tf_run_builder.py", line 94, in run_timeline
fetches = sess.run(ops, feed_dict=feed_dict)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 956, in run
run_metadata_ptr)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 1156, in _run
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1, 132) for Tensor 'default_policy/observation:0', which has shape '(?, 129)'
During handling of the above exception, another exception occurred:
ray::RolloutWorker.sample() (pid=30009, ip=127.0.1.1)
File "python/ray/_raylet.pyx", line 633, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 634, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 636, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 619, in ray._raylet.execute_task.function_executor
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 471, in sample
batches = [self.input_reader.next()]
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/evaluation/sampler.py", line 56, in next
batches = [self.get_data()]
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/evaluation/sampler.py", line 99, in get_data
item = next(self.rollout_provider)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/evaluation/sampler.py", line 327, in _env_runner
active_episodes)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/evaluation/sampler.py", line 551, in _do_policy_eval
eval_results[k] = builder.get(v)
File "/home/neousys/anaconda3/envs/flow/lib/python3.7/site-packages/ray/rllib/utils/tf_run_builder.py", line 53, in get
self.fetches, self.feed_dict))
ValueError: Error fetching: [<tf.Tensor 'default_policy/add:0' shape=(?, 2) dtype=float32>, {'action_prob': <tf.Tensor 'default_policy/Exp_1:0' shape=(?,) dtype=float32>, 'action_logp': <tf.Tensor 'default_policy/sub_2:0' shape=(?,) dtype=float32>, 'vf_preds': <tf.Tensor 'default_policy/Reshape:0' shape=(?,) dtype=float32>, 'behaviour_logits': <tf.Tensor 'default_policy/model/fc_out/BiasAdd:0' shape=(?, 4) dtype=float32>}], feed_dict={<tf.Tensor 'default_policy/observation:0' shape=(?, 129) dtype=float32>: [array([3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607340e-01, 3.62607340e-01, 3.62607340e-01, 3.62607340e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607287e-01, 3.62607273e-01, 3.62607287e-01, 3.62607287e-01,
3.62607340e-01, 3.33613052e-01, 3.62607340e-01, 3.62607340e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607287e-01,
3.62607287e-01, 3.62607287e-01, 3.62607287e-01, 3.62607340e-01,
3.62607340e-01, 3.62607340e-01, 3.62607340e-01, 3.33333333e-01,
9.43940256e-05, 1.20095672e-04, 9.43940256e-05, 9.43940256e-05,
9.30966234e-02, 9.31132363e-02, 9.31395871e-02, 9.31451387e-02,
1.86117317e-01, 1.86149407e-01, 1.86156975e-01, 1.86150932e-01,
2.79174202e-01, 2.79149417e-01, 2.79173169e-01, 2.79159042e-01,
3.72200282e-01, 3.72188549e-01, 3.72195222e-01, 3.72175878e-01,
4.65219661e-01, 4.65186232e-01, 4.65201644e-01, 4.65181475e-01,
5.58237048e-01, 5.58225759e-01, 5.58259103e-01, 5.58234339e-01,
6.51255705e-01, 6.51230188e-01, 6.51253134e-01, 6.51250062e-01,
7.44284794e-01, 7.44298284e-01, 7.44270765e-01, 7.44280813e-01,
8.37336632e-01, 8.37314828e-01, 8.37329671e-01, 8.37286100e-01,
9.30318294e-01, 9.30349433e-01, 9.30293162e-01, 7.50051750e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01,
0.00000000e+00, 2.50000000e-01, 5.00000000e-01, 7.50000000e-01])], <tf.Tensor 'default_policy/action:0' shape=(?, 2) dtype=float32>: [array([-0.8724061, -0.5400728], dtype=float32)], <tf.Tensor 'default_policy/prev_reward:0' shape=(?,) dtype=float32>: [1.0425041000281703], <tf.Tensor 'default_policy/PlaceholderWithDefault:0' shape=() dtype=bool>: False}
When I add many vehicles , the Value Error produces ,especially when I use the network except RingNetwork. Sometimes , when the number of my vehicles are not too high ,and the length of the network is long ,the error won't produce. I do not know where the error produce. What code should I modify to avoid the error reproducing. Could anyone give me some advise?
The text was updated successfully, but these errors were encountered:
When I add many vehicles , the Value Error produces ,especially when I use the network except RingNetwork. Sometimes , when the number of my vehicles are not too high ,and the length of the network is long ,the error won't produce. I do not know where the error produce. What code should I modify to avoid the error reproducing. Could anyone give me some advise?
The text was updated successfully, but these errors were encountered: