init cleanup
This commit is contained in:
parent
deeb335eca
commit
25f7346c5d
Binary file not shown.
|
|
@ -1 +0,0 @@
|
||||||
Makes folder visible for git.
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1 +0,0 @@
|
||||||
Makes folder visible for git.
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
import syslab
|
|
||||||
from json import dump
|
|
||||||
from time import sleep, time
|
|
||||||
|
|
||||||
# Defines location and name of the measurements file
|
|
||||||
LOG_FILE = f'data/measurements/measurements_{time():.00f}.json'
|
|
||||||
print(f"Logging to file {LOG_FILE}")
|
|
||||||
|
|
||||||
# Set up a connection to the switchboards
|
|
||||||
sb3192 = syslab.SwitchBoard('319-2')
|
|
||||||
sb3193 = syslab.SwitchBoard('319-3')
|
|
||||||
sb33012 = syslab.SwitchBoard("330-12")
|
|
||||||
sb1172 = syslab.SwitchBoard('117-2')
|
|
||||||
|
|
||||||
# Convenience function to
|
|
||||||
def take_measurements():
|
|
||||||
measurements = {
|
|
||||||
"pcc_p": sb3192.getActivePower('Grid'),
|
|
||||||
"pcc_q": sb3192.getReactivePower('Grid'),
|
|
||||||
"pv319_p": sb3192.getActivePower('PV'),
|
|
||||||
"pv319_q": sb3192.getReactivePower('PV'),
|
|
||||||
"dumpload_p": sb3192.getActivePower('Dumpload'),
|
|
||||||
"dumpload_q": sb3192.getReactivePower('Dumpload'),
|
|
||||||
"gaia_p": sb33012.getActivePower('Gaia'),
|
|
||||||
"gaia_q": sb33012.getReactivePower('Gaia'),
|
|
||||||
"pv330_p": sb33012.getActivePower('PV_1'),
|
|
||||||
"pv330_q": sb33012.getReactivePower('PV_1'),
|
|
||||||
"b2b_p": sb3193.getActivePower('ABB_Sec'),
|
|
||||||
"b2b_q": sb3193.getReactivePower('ABB_Sec'),
|
|
||||||
"battery_p": sb1172.getActivePower('Battery'),
|
|
||||||
"battery_q": sb1172.getReactivePower('Battery'),
|
|
||||||
}
|
|
||||||
return [{'unit': k, 'value': meas.value, 'time': meas.timestampMicros/1e6} for k, meas in measurements.items()]
|
|
||||||
|
|
||||||
|
|
||||||
while True:
|
|
||||||
measurement = take_measurements()
|
|
||||||
|
|
||||||
# Open the output file in "append" mode which adds lines to the end
|
|
||||||
with open(LOG_FILE, 'a') as file:
|
|
||||||
for m in measurement:
|
|
||||||
# Convert the dictionary m to a json string and put it
|
|
||||||
# in the file.
|
|
||||||
dump(m, file)
|
|
||||||
# Write a newline for each measurement to make loading easier
|
|
||||||
file.write('\n')
|
|
||||||
sleep(1)
|
|
||||||
139
demo_plotter.py
139
demo_plotter.py
|
|
@ -1,139 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
import json
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import os
|
|
||||||
from datetime import timedelta
|
|
||||||
|
|
||||||
## Read the measurements data file ##
|
|
||||||
DATA_MEAS_DIR = 'data\measurements'
|
|
||||||
# Always plot latest datafile - replace [-1] with another index if you want to plot a specific file.
|
|
||||||
MEAS_LOG_FILE = sorted(os.listdir(DATA_MEAS_DIR))[-1]
|
|
||||||
|
|
||||||
# Store each dictionary of the measurements json in a list
|
|
||||||
with open(os.path.join(DATA_MEAS_DIR, MEAS_LOG_FILE)) as f:
|
|
||||||
meas_data = [json.loads(line) for line in f]
|
|
||||||
|
|
||||||
# Use setpoint logger (only necessary for part two of the exercise "collecting fresh data")
|
|
||||||
use_setpoint_log = False
|
|
||||||
|
|
||||||
|
|
||||||
## Read the setpoints data file ##
|
|
||||||
if use_setpoint_log:
|
|
||||||
DATA_SP_DIR = 'data\setpoints'
|
|
||||||
# Always plot latest datafile
|
|
||||||
SP_LOG_FILE = sorted(os.listdir(DATA_SP_DIR))[-1]
|
|
||||||
|
|
||||||
# Store each dictionary of the setpoints json in a list
|
|
||||||
with open(os.path.join(DATA_SP_DIR, SP_LOG_FILE)) as f:
|
|
||||||
sp_data = [json.loads(line) for line in f]
|
|
||||||
|
|
||||||
# Merge measurements and setpoints in one list
|
|
||||||
data = meas_data + sp_data
|
|
||||||
|
|
||||||
else:
|
|
||||||
data = meas_data
|
|
||||||
|
|
||||||
|
|
||||||
# Construct a dataframe and pivot it to obtain a dataframe with a column per unit, and a row per timestamp.
|
|
||||||
df = pd.DataFrame.from_records(data)
|
|
||||||
df_pivot = df.pivot_table(values='value', columns='unit', index='time')
|
|
||||||
|
|
||||||
|
|
||||||
# Plot the data. Note, that the data will mostly not be plotted with lines.
|
|
||||||
plt.ion() # Turn interactive mode on
|
|
||||||
plt.figure()
|
|
||||||
ax1 = plt.subplot(211) # Make two separate figures
|
|
||||||
ax2 = plt.subplot(212)
|
|
||||||
df_pivot[[c for c in df_pivot.columns if "_p" in c]].plot(marker='.', ax=ax1, linewidth=3)
|
|
||||||
df_pivot[[c for c in df_pivot.columns if "_q" in c]].plot(marker='.', ax=ax2, linewidth=3)
|
|
||||||
plt.show(block=True)
|
|
||||||
|
|
||||||
|
|
||||||
## TODO Q1: Your code here
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## TODO Q2:
|
|
||||||
# Convert time column (index) of df_pivot to datetime
|
|
||||||
# TODO Your code here
|
|
||||||
# Hint1: You can use pandas to_numeric() to prepare the index for pandas to_datetime function
|
|
||||||
# Hint2: Remember to define the unit within pandas to_datetime function
|
|
||||||
|
|
||||||
# Resample the data
|
|
||||||
# TODO Your code here
|
|
||||||
|
|
||||||
|
|
||||||
# Interpolate the measurements
|
|
||||||
# TODO Your code here
|
|
||||||
# Hint: For part two of the exercise ("collecting fresh data") the nan rows after a setpoint
|
|
||||||
# in the recorded step function should be filled with the value of the setpoint until the row of the next setpoint is reached
|
|
||||||
# You can use the df.fillna(method="ffill") function for that purpose. However, the measurements should still be interpolated!
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Plot the resampled data
|
|
||||||
# TODO Your code here
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## TODO Q3: Your code here
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## TODO Q4: Your code here
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Part two: "Collecting fresh data"
|
|
||||||
|
|
||||||
# Hint 1: You can build up on the "read_and_plot_data.py" from day 2
|
|
||||||
# Hint 2: Yoy may want to store your response metric functions from day 2 in the "util.py" and import all of them with
|
|
||||||
# "from util import *"
|
|
||||||
|
|
||||||
if use_setpoint_log:
|
|
||||||
|
|
||||||
# Add a column to df_pivot containing the reference/target signal
|
|
||||||
# TODO your code here
|
|
||||||
|
|
||||||
# Loop over all steps and extract T_1, T_2 and the step size
|
|
||||||
results = {}
|
|
||||||
|
|
||||||
for idx in range(0, len(sp_data)-1):
|
|
||||||
label = f"Step_{sp_data[idx]['value']}kW"
|
|
||||||
|
|
||||||
# Extract T_1 and T_2 from the setpoint JSON
|
|
||||||
# TODO your code here
|
|
||||||
|
|
||||||
|
|
||||||
# Change timestamp format
|
|
||||||
T_1 = pd.to_datetime(pd.to_numeric(T_1), unit="s").round("0.1S")
|
|
||||||
T_2 = pd.to_datetime(pd.to_numeric(T_2), unit="s").round("0.1S")
|
|
||||||
|
|
||||||
# To ensure we are not considering values of the next load step
|
|
||||||
T_2 = T_2 - timedelta(seconds=0.2)
|
|
||||||
|
|
||||||
|
|
||||||
# define measured output y and target setpoint r
|
|
||||||
# TODO your code here
|
|
||||||
|
|
||||||
# Derive step direction from the setpoint data
|
|
||||||
if ...: # TODO your code here
|
|
||||||
Positive_step = True
|
|
||||||
else:
|
|
||||||
Positive_step = False
|
|
||||||
|
|
||||||
# Collect response metrics results
|
|
||||||
results[label] = {
|
|
||||||
# TODO your code here
|
|
||||||
}
|
|
||||||
|
|
||||||
pd.DataFrame.from_dict(results).plot(kind='bar')
|
|
||||||
plt.title("Metrics")
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.savefig('data/test_metrics'+MEAS_LOG_FILE[-10:]+'.png')
|
|
||||||
plt.show(block=True)
|
|
||||||
|
|
||||||
|
|
||||||
Loading…
Reference in New Issue