Skip to content

More plotly results annotations #2375

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 52 additions & 17 deletions pandapower/plotting/plotly/pf_res_plotly.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

power is normally measured in MW, current in kA. Voltage is in p.u. aka arbitrary units, which can be recalculated by multiplying res_bus.vm_pu with bus.vn_kv. So a binary switch which will change between pu and rated_voltage would suffice.

Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
def pf_res_plotly(net, cmap="Jet", use_line_geo=None, on_map=False, projection=None,
map_style='basic', figsize=1, aspectratio='auto', line_width=2, bus_size=10,
climits_volt=(0.9, 1.1), climits_load=(0, 100), cpos_volt=1.0, cpos_load=1.1,
filename="temp-plot.html", auto_open=True):
filename="temp-plot.html", auto_open=True, power_unit="k", current_unit="", voltage_unit=""):
"""
Plots a pandapower network in plotly

Expand Down Expand Up @@ -75,12 +75,19 @@

**auto_open** (bool, True) - automatically open plot in browser

**power_unit** (str, 'k') - default unit of displayed P, Q, S data ["", "k", "M"]

**current_unit** (str, '') - default unit of displayed I data ["", "k"]

**voltage_unit** (str, '') - default unit of displayed V data ["", "k"]

OUTPUT:
**figure** (graph_objs._figure.Figure) figure object

"""
if 'res_bus' not in net or net.get('res_bus').shape[0] == 0:
logger.warning('There are no Power Flow results. A Newton-Raphson power flow will be executed.')
logger.warning(

Check warning on line 89 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L89

Added line #L89 was not covered by tests
'There are no Power Flow results. A Newton-Raphson power flow will be executed.')
runpp(net)

# create geocoord if none are available
Expand All @@ -89,7 +96,8 @@
" This may take some time")
create_generic_coordinates(net, respect_switches=True)
if on_map:
logger.warning("Map plots not available with artificial coordinates and will be disabled!")
logger.warning(

Check warning on line 99 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L99

Added line #L99 was not covered by tests
"Map plots not available with artificial coordinates and will be disabled!")
on_map = False

# for geo_type in ["bus_geodata", "line_geodata"]:
Expand All @@ -104,7 +112,6 @@
# "indices. That can cause troubles for draw_traces(): " + str(
# dupl_geo_idx))


# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map and projection is not None:
geo_data_to_latlong(net, projection=projection)
Expand All @@ -113,11 +120,25 @@
# initializating bus trace
# hoverinfo which contains name and pf results
precision = 3

if voltage_unit == "":
voltage_factor = 1e3

Check warning on line 125 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L124-L125

Added lines #L124 - L125 were not covered by tests
else:
voltage_factor = 1
if power_unit == "":
power_factor = 1e6
elif power_unit == "k":
power_factor = 1e3

Check warning on line 131 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L127-L131

Added lines #L127 - L131 were not covered by tests
else:
power_factor = 1

Check warning on line 133 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L133

Added line #L133 was not covered by tests

hoverinfo = (
net.bus.name.astype(str) + '<br />' +
'V_m = ' + net.res_bus.vm_pu.round(precision).astype(str) + ' pu' + '<br />' +
'V_m = ' + (net.res_bus.vm_pu * net.bus.vn_kv.round(2)).round(precision).astype(str) + ' kV' + '<br />' +
'V_a = ' + net.res_bus.va_degree.round(precision).astype(str) + ' deg').tolist()
net.bus.name.astype(str) + '<br />' +
'V_m = ' + net.res_bus.vm_pu.round(precision).astype(str) + ' pu' + '<br />' +
'V_m = ' + (net.res_bus.vm_pu * net.bus.vn_kv.round(2) * voltage_factor).round(precision).astype(str) + ' ' + voltage_unit + 'V' + '<br />' +
'V_a = ' + net.res_bus.va_degree.round(precision).astype(str) + ' deg' + '<br />' +
'P = ' + (net.res_bus.p_mw * power_factor).round(precision).astype(str) + ' ' + power_unit + 'W' + '<br />' +
'Q = ' + (net.res_bus.q_mvar * power_factor).round(precision).astype(str) + ' ' + power_unit + 'Var').tolist()
hoverinfo = pd.Series(index=net.bus.index, data=hoverinfo)
bus_trace = create_bus_trace(net, net.bus.index, size=bus_size, infofunc=hoverinfo, cmap=cmap,
cbar_title='Bus Voltage [pu]', cmin=climits_volt[0], cmax=climits_volt[1],
Expand All @@ -130,14 +151,24 @@
if use_line_geo is None:
use_line_geo = False if any(net.line.geo.isna()) else True
elif use_line_geo and any(net.line.geo.isna()):
logger.warning("No or insufficient line geodata available --> only bus geodata will be used.")
logger.warning(

Check warning on line 154 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L154

Added line #L154 was not covered by tests
"No or insufficient line geodata available --> only bus geodata will be used.")
use_line_geo = False
# hoverinfo which contains name and pf results
if current_unit == "":
current_factor = 1e3

Check warning on line 159 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L158-L159

Added lines #L158 - L159 were not covered by tests
else:
current_factor = 1

Check warning on line 161 in pandapower/plotting/plotly/pf_res_plotly.py

View check run for this annotation

Codecov / codecov/patch

pandapower/plotting/plotly/pf_res_plotly.py#L161

Added line #L161 was not covered by tests
hoverinfo = (
net.line.name.astype(str) + '<br />' +
'I = ' + net.res_line.loading_percent.round(precision).astype(str) + ' %' + '<br />' +
'I_from = ' + net.res_line.i_from_ka.round(precision).astype(str) + ' kA' + '<br />' +
'I_to = ' + net.res_line.i_to_ka.round(precision).astype(str) + ' kA' + '<br />').tolist()
net.line.name.astype(str) + '<br />' +
'I = ' + net.res_line.loading_percent.round(precision).astype(str) + ' %' + '<br />' +
'I_from = ' + (net.res_line.i_from_ka * current_factor).round(precision).astype(str) + ' ' + current_unit + 'A' + '<br />' +
'I_to = ' + (net.res_line.i_to_ka * current_factor).round(precision).astype(str) + ' ' + current_unit + 'A' + '<br />' +
'P_from = ' + (net.res_line.p_from_mw * power_factor).round(precision).astype(str) + ' ' + power_unit + 'W' + '<br />' +
'P_to = ' + (net.res_line.p_to_mw * power_factor).round(precision).astype(str) + ' ' + power_unit + 'W' + '<br />' +
'Q_from = ' + (net.res_line.q_from_mvar * power_factor).round(precision).astype(str) + ' ' + power_unit + 'Var' + '<br />' +
'Q_to = ' + (net.res_line.q_to_mvar * power_factor).round(precision).astype(str) + ' ' + power_unit + 'Var').tolist()

hoverinfo = pd.Series(index=net.line.index, data=hoverinfo)
line_traces = create_line_trace(net, use_line_geo=use_line_geo, respect_switches=True,
width=line_width,
Expand All @@ -152,10 +183,14 @@
# ----- Trafos ------
# hoverinfo which contains name and pf results
hoverinfo = (
net.trafo.name.astype(str) + '<br />' +
'I = ' + net.res_trafo.loading_percent.round(precision).astype(str) + ' %' + '<br />' +
'I_hv = ' + net.res_trafo.i_hv_ka.round(precision).astype(str) + ' kA' + '<br />' +
'I_lv = ' + net.res_trafo.i_lv_ka.round(precision).astype(str) + ' kA' + '<br />').tolist()
net.trafo.name.astype(str) + '<br />' +
'I = ' + net.res_trafo.loading_percent.round(precision).astype(str) + ' %' + '<br />' +
'I_hv = ' + (net.res_trafo.i_hv_ka * current_factor).round(precision).astype(str) + ' ' + current_unit + 'A' + '<br />' +
'I_lv = ' + (net.res_trafo.i_lv_ka * current_factor).round(precision).astype(str) + ' ' + current_unit + 'A' + '<br />' +
'P_hv = ' + (net.res_trafo.p_hv_mw * power_factor).round(precision).astype(str) + ' ' + power_unit + 'W' + '<br />' +
'P_lv = ' + (net.res_trafo.p_lv_mw * power_factor).round(precision).astype(str) + ' ' + power_unit + 'W' + '<br />' +
'Q_hv = ' + (net.res_trafo.q_hv_mvar * power_factor).round(precision).astype(str) + ' ' + power_unit + 'Var' + '<br />' +
'Q_lv = ' + (net.res_trafo.q_lv_mvar * power_factor).round(precision).astype(str) + ' ' + power_unit + 'Var' + '<br />').tolist()
hoverinfo = pd.Series(index=net.trafo.index, data=hoverinfo)
trafo_traces = create_trafo_trace(net, width=line_width * 1.5, infofunc=hoverinfo,
cmap=cmap_lines, cmin=0, cmax=100)
Expand Down
49 changes: 24 additions & 25 deletions pandapower/test/api/test_file_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,18 +65,18 @@ def net_in(request):


def test_pickle(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.p"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.p"))
pp.to_pickle(net_in, filename)
net_out = pp.from_pickle(filename)
# pickle sems to changes column types
assert_net_equal(net_in, net_out)


@pytest.mark.skipif(not xlsxwriter_INSTALLED or not openpyxl_INSTALLED, reason=(
"xlsxwriter is mandatory to write excel files and openpyxl to read excels, but is not installed."
"xlsxwriter is mandatory to write excel files and openpyxl to read excels, but is not installed."
))
def test_excel(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.xlsx"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.xlsx"))
pp.to_excel(net_in, filename)
net_out = pp.from_excel(filename)
assert_net_equal(net_in, net_out)
Expand All @@ -92,7 +92,7 @@ def test_excel(net_in, tmp_path):
@pytest.mark.skipif(not xlsxwriter_INSTALLED,
reason="xlsxwriter is mandatory to write excel files, but is not installed.")
def test_excel_controllers(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.xlsx"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.xlsx"))
pp.control.DiscreteTapControl(net_in, 0, 0.95, 1.05)
pp.to_excel(net_in, filename)
net_out = pp.from_excel(filename)
Expand All @@ -102,7 +102,7 @@ def test_excel_controllers(net_in, tmp_path):

def test_json_basic(net_in, tmp_path):
# tests the basic json functionality with the encoder/decoder classes
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
with open(filename, 'w') as fp:
json.dump(net_in, fp, cls=PPJSONEncoder)

Expand All @@ -122,7 +122,7 @@ def test_json_controller_none():


def test_json(net_in, tmp_path):
filename = os.path.join(os.path.abspath(str(tmp_path)), "testfile.json")
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))

if GEOPANDAS_INSTALLED and SHAPELY_INSTALLED:
net_geo = copy.deepcopy(net_in)
Expand Down Expand Up @@ -154,7 +154,7 @@ def test_json(net_in, tmp_path):
@pytest.mark.skipif(not cryptography_INSTALLED, reason=("cryptography is mandatory to encrypt "
"json files, but is not installed."))
def test_encrypted_json(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
pp.to_json(net_in, filename, encryption_key="verysecret")
with pytest.raises(json.JSONDecodeError):
pp.from_json(filename)
Expand All @@ -165,15 +165,15 @@ def test_encrypted_json(net_in, tmp_path):


def test_type_casting_json(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
net_in.sn_kva = 1000
pp.to_json(net_in, filename)
net = pp.from_json(filename)
assert_net_equal(net_in, net)


def test_from_json_add_basic_std_types(tmp_path):
filename = os.path.abspath(str(tmp_path)) + r"\testfile_std_types.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile_std_types.json"))
# load older load network and change std-type
net = create_test_network2()
net.std_types["line"]['15-AL1/3-ST1A 0.4']["max_i_ka"] = 111
Expand All @@ -188,16 +188,16 @@ def test_from_json_add_basic_std_types(tmp_path):


@pytest.mark.xfail(reason="For std_types, some dtypes are not returned correctly by sql. Therefore,"
" a workaround test was created to check everything else.")
" a workaround test was created to check everything else.")
def test_sqlite(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.db"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.db"))
pp.to_sqlite(net_in, filename)
net_out = pp.from_sqlite(filename)
assert_net_equal(net_in, net_out)


def test_sqlite_workaround(net_in, tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.db"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.db"))
pp.to_sqlite(net_in, filename)
net_out = pp.from_sqlite(filename)
assert_net_equal(net_in, net_out, exclude_elms=["std_types"])
Expand All @@ -210,7 +210,7 @@ def test_convert_format(): # TODO what is this thing testing ?


def test_to_json_dtypes(tmp_path):
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
net = create_test_network()
pp.runpp(net)
net['res_test'] = pd.DataFrame(columns=['test'], data=[1, 2, 3])
Expand Down Expand Up @@ -341,7 +341,7 @@ def test_json_io_same_net(net_in, tmp_path):
net1 = pp.from_json_string(s)
assert isinstance(net1.controller.object.at[0], control.ConstControl)

filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
pp.to_json(net_in, filename)
net2 = pp.from_json(filename)
assert isinstance(net2.controller.object.at[0], control.ConstControl)
Expand Down Expand Up @@ -376,7 +376,7 @@ def test_deepcopy_controller():

def test_elements_to_deserialize(tmp_path):
net = networks.mv_oberrhein()
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
pp.to_json(net, filename)
net_select = pp.from_json(filename, elements_to_deserialize=['bus', 'load'])
for key, item in net_select.items():
Expand Down Expand Up @@ -406,7 +406,7 @@ def test_elements_to_deserialize(tmp_path):

def test_elements_to_deserialize_wo_keep(tmp_path):
net = networks.mv_oberrhein()
filename = os.path.abspath(str(tmp_path)) + "testfile.json"
filename = os.path.abspath(os.path.join(str(tmp_path), "testfile.json"))
pp.to_json(net, filename)
net_select = pp.from_json(filename, elements_to_deserialize=['bus', 'load'],
keep_serialized_elements=False)
Expand Down Expand Up @@ -473,7 +473,7 @@ def test_replace_elements_json_string(net_in):

net_load = pp.from_json_string(json_string,
replace_elements={r'pandapower.control.controller.const_control':
r'pandapower.test.api.input_files.test_control'})
r'pandapower.test.api.input_files.test_control'})
assert net_orig.controller.at[0, 'object'] == net_load.controller.at[0, 'object']
assert nets_equal(net_orig, net_load)
pp.runpp(net_load, run_control=True)
Expand All @@ -489,7 +489,7 @@ def test_json_generalized():
"df1": [('col1', np.dtype(object)),
('col2', 'f8'),],
"df2": [("col3", 'bool'),
("col4", "i8")]
("col4", "i8")]
})
general_net1 = copy.deepcopy(general_net0)
general_net1.df1.loc[0] = ["hey", 1.2]
Expand All @@ -513,7 +513,7 @@ def test_json_simple_index_type():
df4 = pd.DataFrame(s4)
df5, df6, df7, df8 = df1.T, df2.T, df3.T, df4.T
df9 = pd.DataFrame([[1, 2, 3], [4, 5, 7]], index=[1, "2"], columns=[4, "5", 6])
input = {key: val for key, val in zip("abcdefghijkl", [
input = {key: val for key, val in zip("abcdefghijkl", [
s1, s2, s3, s4, df1, df2, df3, df4, df5, df6, df7, df8, df9])}
json_str = pp.to_json(input)
output = pp.from_json_string(json_str, convert=False)
Expand All @@ -537,25 +537,24 @@ def test_json_index_names():


def test_json_multiindex_and_index_names():

# idx_tuples = tuple(zip(["a", "a", "b", "b"], ["bar", "baz", "foo", "qux"]))
idx_tuples = tuple(zip([1, 1, 2, 2], ["bar", "baz", "foo", "qux"]))
col_tuples = tuple(zip(["d", "d", "e"], ["bak", "baq", "fuu"]))
idx1 = pd.MultiIndex.from_tuples(idx_tuples)
idx2 = pd.MultiIndex.from_tuples(idx_tuples, names=[5, 6])
idx3 = pd.MultiIndex.from_tuples(idx_tuples, names=["fifth", "sixth"])
col1 = pd.MultiIndex.from_tuples(col_tuples)
col2 = pd.MultiIndex.from_tuples(col_tuples, names=[7, 8]) # ["7", "8"] is not possible since
col2 = pd.MultiIndex.from_tuples(col_tuples, names=[7, 8]) # ["7", "8"] is not possible since
# orient="columns" loses info whether index/column is an iteger or a string
col3 = pd.MultiIndex.from_tuples(col_tuples, names=[7, None])

for idx, col in zip([idx1, idx2, idx3], [col1, col2, col3]):
s_mi = pd.Series(range(4), index=idx)
df_mi = pd.DataFrame(np.arange(4*3).reshape((4, 3)), index=idx)
df_mc = pd.DataFrame(np.arange(4*3).reshape((4, 3)), columns=col)
df_mi_mc = pd.DataFrame(np.arange(4*3).reshape((4, 3)), index=idx, columns=col)
df_mi = pd.DataFrame(np.arange(4 * 3).reshape((4, 3)), index=idx)
df_mc = pd.DataFrame(np.arange(4 * 3).reshape((4, 3)), columns=col)
df_mi_mc = pd.DataFrame(np.arange(4 * 3).reshape((4, 3)), index=idx, columns=col)

input = {key: val for key, val in zip("abcd", [s_mi, df_mi, df_mc, df_mi_mc])}
input = {key: val for key, val in zip("abcd", [s_mi, df_mi, df_mc, df_mi_mc])}
json_str = pp.to_json(input)
output = pp.from_json_string(json_str, convert=False)
assert_series_equal(input["a"], output["a"], check_dtype=False)
Expand Down
Loading