Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unnecessary use of comprehension #5

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions ema_workbench/analysis/dimensional_stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def discretize(data, nbins=3, with_labels=False):
n_unique = column_data.unique().shape[0]
n = n_unique
column_data = column_data.cat.rename_categories(
[x for x in range(1, n + 1)]
list(range(1, n + 1))
)
indices = column_data

Expand Down Expand Up @@ -414,8 +414,8 @@ def create_pivot_plot(
n = nr_levels * 2

scores = scores.index.tolist()
rows = [entry for entry in scores[0:n:2]]
columns = [entry for entry in scores[1:n:2]]
rows = list(scores[0:n:2])
columns = list(scores[1:n:2])

discretized_x = discretize(x, nbins=nbins, with_labels=bin_labels)

Expand Down
6 changes: 3 additions & 3 deletions ema_workbench/analysis/plotting_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def plot_violinplot(ax, values, log, group_labels=None):
if not group_labels:
group_labels = [""]

data = pd.DataFrame.from_records({k: v for k, v in zip(group_labels, values)})
data = pd.DataFrame.from_records(dict(zip(group_labels, values)))
data = pd.melt(data)

sns.violinplot(x="variable", y="value", data=data, order=group_labels, ax=ax)
Expand All @@ -246,7 +246,7 @@ def plot_boxenplot(ax, values, log, group_labels=None):
if not group_labels:
group_labels = [""]

data = pd.DataFrame.from_records({k: v for k, v in zip(group_labels, values)})
data = pd.DataFrame.from_records(dict(zip(group_labels, values)))
data = pd.melt(data)

sns.boxenplot(x="variable", y="value", data=data, order=group_labels, ax=ax)
Expand Down Expand Up @@ -771,7 +771,7 @@ def prepare_data(
if filter_scalar:
outcomes = filter_scalar_outcomes(outcomes)
if not outcomes_to_show:
outcomes_to_show = [o for o in outcomes.keys()]
outcomes_to_show = list(outcomes.keys())

# group the data if desired
if group_by:
Expand Down
4 changes: 2 additions & 2 deletions ema_workbench/analysis/scenario_discovery_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def _get_sorted_box_lims(boxes, box_init):
# sort the uncertainties based on the normalized size of the
# restricted dimensions
uncs = uncs[np.argsort(box_size)]
box_lims = [box for box in boxes]
box_lims = list(boxes)

return box_lims, uncs.tolist()

Expand Down Expand Up @@ -454,7 +454,7 @@ def _setup_figure(uncs):
ax.add_patch(rect)
ax.set_xlim(left=-0.2, right=1.2)
ax.set_ylim(top=-0.5, bottom=nr_unc - 0.5)
ax.yaxis.set_ticks([y for y in range(nr_unc)])
ax.yaxis.set_ticks(list(range(nr_unc)))
ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels(uncs[::-1])
return fig, ax
Expand Down
4 changes: 2 additions & 2 deletions ema_workbench/em_framework/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def __init__(
self._categories = NamedObjectMap(Category)

self.categories = cats
self.resolution = [i for i in range(len(self.categories))]
self.resolution = list(range(len(self.categories)))
self.multivalue = multivalue

def index_for_cat(self, category):
Expand Down Expand Up @@ -488,7 +488,7 @@ def parameters_to_csv(parameters, file_name):
else:
values = param.lower_bound, param.upper_bound

dict_repr = {j: value for j, value in enumerate(values)}
dict_repr = dict(enumerate(values))
dict_repr["name"] = param.name

params[i] = dict_repr
Expand Down
2 changes: 1 addition & 1 deletion ema_workbench/em_framework/points.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def combine_cases_sampling(*point_collection):

# figure out the longest
def exhaust_cases(cases):
return [case for case in cases]
return list(cases)

point_collection = [exhaust_cases(case) for case in point_collection]
longest_cases = max(point_collection, key=len)
Expand Down
2 changes: 1 addition & 1 deletion ema_workbench/examples/eijgenraam_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@
52: (49.2200, 1.6075, 0.0047, 0.036173, 0.304, 0.001716, 4025.6, 0.00171, 1 / 1250),
53: (69.4565, 1.1625, 0.0028, 0.031651, 0.336, 0.002700, 9819.5, 0.00171, 1 / 1250),
}
data = {i: {k: v for k, v in zip(params, raw_data[i])} for i in raw_data.keys()}
data = {i: dict(zip(params, raw_data[i])) for i in raw_data.keys()}

# Set the ring we are analyzing
ring = 15
Expand Down
2 changes: 1 addition & 1 deletion test/test_em_framework/test_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def test_store_results(self):

_, out = callback.get_results()

self.assertIn(outcomes[0].name, {entry for entry in out.keys()})
self.assertIn(outcomes[0].name, set(out.keys()))
self.assertEqual(out[outcomes[0].name].shape, (3,))

# case 2 time series shape = (1, nr_time_steps)
Expand Down
6 changes: 3 additions & 3 deletions test/test_em_framework/test_points.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ def test_experiment_gemerator(self):
experiments = points.experiment_generator(
scenarios, model_structures, policies, combine="factorial"
)
experiments = [e for e in experiments]
experiments = list(experiments)
self.assertEqual(
len(experiments), 6, ("wrong number of experiments " "for factorial")
)

experiments = points.experiment_generator(
scenarios, model_structures, policies, combine="sample"
)
experiments = [e for e in experiments]
experiments = list(experiments)
self.assertEqual(
len(experiments), 3, ("wrong number of experiments " "for zipover")
)
Expand All @@ -30,7 +30,7 @@ def test_experiment_gemerator(self):
experiments = points.experiment_generator(
scenarios, model_structures, policies, combine="adf"
)
_ = [e for e in experiments]
_ = list(experiments)

# def test_experiment_generator(self):
# sampler = LHSSampler()
Expand Down