Made projects deadline ready(?)
parent
4175d6b4ef
commit
f0a5af501e
|
@ -0,0 +1,760 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a5b326e2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"glob_path = '/opt/iui-datarelease2-sose2021/*/split_letters_csv/*'\n",
|
||||
"\n",
|
||||
"pickle_file = 'data.pickle'\n",
|
||||
"\n",
|
||||
"checkpoint_path = \"training_copy/cp.ckpt\"\n",
|
||||
"checkpoint_dir = os.path.dirname(checkpoint_path)\n",
|
||||
"\n",
|
||||
"# divisor for neuron count step downs (hard to describe), e.g. dense_step = 3: layer1=900, layer2 = 300, layer3 = 100, layer4 = 33...\n",
|
||||
"dense_steps = 2\n",
|
||||
"# amount of dense/dropout layers\n",
|
||||
"layer_count = 3\n",
|
||||
"# how much to drop\n",
|
||||
"drop_count = 0.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e834add0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from glob import glob\n",
|
||||
"import pandas as pd\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"def dl_from_blob(filename) -> list:\n",
|
||||
" all_data = []\n",
|
||||
" \n",
|
||||
" for path in tqdm(glob(filename)):\n",
|
||||
" path = path\n",
|
||||
" df = pd.read_csv(path, ';')\n",
|
||||
" u = path.split('/')[3]\n",
|
||||
" l = ''.join(filter(lambda x: x.isalpha(), path.split('/')[5]))[0] \n",
|
||||
" d = {\n",
|
||||
" 'file': path,\n",
|
||||
" 'data': df,\n",
|
||||
" 'user': u,\n",
|
||||
" 'label': l\n",
|
||||
" }\n",
|
||||
" all_data.append(d)\n",
|
||||
" return all_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ec9e9e4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def save_pickle(f, structure):\n",
|
||||
" _p = open(f, 'wb')\n",
|
||||
" pickle.dump(structure, _p)\n",
|
||||
" _p.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "44338438",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pickle\n",
|
||||
"\n",
|
||||
"def load_pickles(f) -> list:\n",
|
||||
" _p = open(pickle_file, 'rb')\n",
|
||||
" _d = pickle.load(_p)\n",
|
||||
" _p.close()\n",
|
||||
" \n",
|
||||
" return _d"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2627c3c5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loading data...\n",
|
||||
"data.pickle found...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"def load_data() -> list:\n",
|
||||
" if os.path.isfile(pickle_file):\n",
|
||||
" print(f'{pickle_file} found...')\n",
|
||||
" return load_pickles(pickle_file)\n",
|
||||
" print(f'Didn\\'t find {pickle_file}...')\n",
|
||||
" all_data = dl_from_blob(glob_path)\n",
|
||||
" print(f'Creating {pickle_file}...')\n",
|
||||
" save_pickle(pickle_file, all_data)\n",
|
||||
" return all_data\n",
|
||||
"\n",
|
||||
"print(\"Loading data...\")\n",
|
||||
"data = load_data()\n",
|
||||
"# plot_pd(data[0]['data'], False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "71e6d157",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"def plot_pd(data, force=True):\n",
|
||||
" fig, axs = plt.subplots(5, 3, figsize=(3*3, 3*5))\n",
|
||||
" axs[0][0].plot(data['Acc1 X'])\n",
|
||||
" axs[0][1].plot(data['Acc1 Y'])\n",
|
||||
" axs[0][2].plot(data['Acc1 Z'])\n",
|
||||
" axs[1][0].plot(data['Acc2 X'])\n",
|
||||
" axs[1][1].plot(data['Acc2 Y'])\n",
|
||||
" axs[1][2].plot(data['Acc2 Z'])\n",
|
||||
" axs[2][0].plot(data['Gyro X'])\n",
|
||||
" axs[2][1].plot(data['Gyro Y'])\n",
|
||||
" axs[2][2].plot(data['Gyro Z'])\n",
|
||||
" axs[3][0].plot(data['Mag X'])\n",
|
||||
" axs[3][1].plot(data['Mag Y'])\n",
|
||||
" axs[3][2].plot(data['Mag Z'])\n",
|
||||
" axs[4][0].plot(data['Time'])\n",
|
||||
"\n",
|
||||
" if force:\n",
|
||||
" for a in axs:\n",
|
||||
" for b in a:\n",
|
||||
" b.plot(data['Force'])\n",
|
||||
" else:\n",
|
||||
" axs[4][1].plot(data['Force'])\n",
|
||||
"\n",
|
||||
"def plot_np(data, force=True):\n",
|
||||
" fig, axs = plt.subplots(5, 3, figsize=(3*3, 3*5))\n",
|
||||
" axs[0][0].plot(data[0])\n",
|
||||
" axs[0][1].plot(data[1])\n",
|
||||
" axs[0][2].plot(data[2])\n",
|
||||
" axs[1][0].plot(data[3])\n",
|
||||
" axs[1][1].plot(data[4])\n",
|
||||
" axs[1][2].plot(data[5])\n",
|
||||
" axs[2][0].plot(data[6])\n",
|
||||
" axs[2][1].plot(data[7])\n",
|
||||
" axs[2][2].plot(data[8])\n",
|
||||
" axs[3][0].plot(data[9])\n",
|
||||
" axs[3][1].plot(data[10])\n",
|
||||
" axs[3][2].plot(data[11])\n",
|
||||
" axs[4][0].plot(data[13])\n",
|
||||
"\n",
|
||||
" if force:\n",
|
||||
" for a in axs:\n",
|
||||
" for b in a:\n",
|
||||
" b.plot(data[12])\n",
|
||||
" else:\n",
|
||||
" axs[4][1].plot(data[12])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "19c4c56e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def mill_drop(entry):\n",
|
||||
" #drop millis on single\n",
|
||||
" data_wo_mill = entry['data'].drop(labels='Millis', axis=1, inplace=False)\n",
|
||||
" drop_entry = entry\n",
|
||||
" drop_entry['data'] = data_wo_mill.reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return drop_entry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ea509043",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"def cut_force(drop_entry):\n",
|
||||
" # force trans\n",
|
||||
" shorten_entry = drop_entry\n",
|
||||
" shorten_data = shorten_entry['data']\n",
|
||||
" sf_entry = shorten_data['Force']\n",
|
||||
" leeway = 10\n",
|
||||
" \n",
|
||||
" try:\n",
|
||||
" thresh = 70\n",
|
||||
" temps_over_T = np.where(sf_entry > thresh)[0]\n",
|
||||
" shorten_data = shorten_data[max(temps_over_T.min()-leeway,0):min(len(sf_entry)-1,temps_over_T.max()+leeway)]\n",
|
||||
" except:\n",
|
||||
" thresold = 0.05\n",
|
||||
" thresh = sf_entry.max()*thresold\n",
|
||||
" temps_over_T = np.where(sf_entry > thresh)[0]\n",
|
||||
" shorten_data = shorten_data[max(temps_over_T.min()-leeway,0):min(len(sf_entry)-1,temps_over_T.max()+leeway)]\n",
|
||||
" \n",
|
||||
" shorten_entry['data'] = shorten_data.reset_index(drop=True)\n",
|
||||
" return shorten_entry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7025983c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def norm_force(shorten_entry, flist):\n",
|
||||
" fnorm_entry = shorten_entry\n",
|
||||
" u = fnorm_entry['user']\n",
|
||||
" d = fnorm_entry['data']\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" d['Force'] = ((d['Force'] - flist[u].mean())/flist[u].std())\n",
|
||||
" \n",
|
||||
" fnorm_entry['data'] = fnorm_entry['data'].reset_index(drop=True)\n",
|
||||
" return fnorm_entry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "64860f57",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def time_trans(fnorm_entry):\n",
|
||||
" #timetrans\n",
|
||||
" time_entry = fnorm_entry\n",
|
||||
" \n",
|
||||
" time_entry['data']['Time'] = fnorm_entry['data']['Time']-fnorm_entry['data']['Time'][0]\n",
|
||||
" \n",
|
||||
" time_entry['data'] = time_entry['data'].reset_index(drop=True)\n",
|
||||
"\n",
|
||||
" return time_entry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0bb308a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def norm(time_entry):\n",
|
||||
" # normalize\n",
|
||||
" norm_entry = time_entry\n",
|
||||
" \n",
|
||||
" norm_entry['data']['Acc1 X'] = norm_entry['data']['Acc1 X'] / 32768\n",
|
||||
" norm_entry['data']['Acc1 Y'] = norm_entry['data']['Acc1 Y'] / 32768\n",
|
||||
" norm_entry['data']['Acc1 Z'] = norm_entry['data']['Acc1 Z'] / 32768\n",
|
||||
" norm_entry['data']['Acc2 X'] = norm_entry['data']['Acc2 X'] / 8192\n",
|
||||
" norm_entry['data']['Acc2 Y'] = norm_entry['data']['Acc2 Y'] / 8192\n",
|
||||
" norm_entry['data']['Acc2 Z'] = norm_entry['data']['Acc2 Z'] / 8192\n",
|
||||
" norm_entry['data']['Gyro X'] = norm_entry['data']['Gyro X'] / 32768\n",
|
||||
" norm_entry['data']['Gyro Y'] = norm_entry['data']['Gyro Y'] / 32768\n",
|
||||
" norm_entry['data']['Gyro Z'] = norm_entry['data']['Gyro Z'] / 32768\n",
|
||||
" norm_entry['data']['Mag X'] = norm_entry['data']['Mag X'] / 8192\n",
|
||||
" norm_entry['data']['Mag Y'] = norm_entry['data']['Mag Y'] / 8192\n",
|
||||
" norm_entry['data']['Mag Z'] = norm_entry['data']['Mag Z'] / 8192\n",
|
||||
" \n",
|
||||
" norm_entry['data'] = norm_entry['data'].reset_index(drop=True)\n",
|
||||
" \n",
|
||||
" return norm_entry"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1171c8ef",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Preprocessing...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 26179/26179 [01:29<00:00, 292.14it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def preproc(d):\n",
|
||||
" flist = {} \n",
|
||||
" d_res = []\n",
|
||||
" for e in data:\n",
|
||||
" if e['user'] not in flist:\n",
|
||||
" flist[e['user']] = e['data']['Force']\n",
|
||||
" else:\n",
|
||||
" flist[e['user']] = flist[e['user']].append(e['data']['Force'])\n",
|
||||
" \n",
|
||||
" for e in tqdm(data):\n",
|
||||
" d_res.append(preproc_entry(e, flist))\n",
|
||||
" return d_res\n",
|
||||
" \n",
|
||||
"def preproc_entry(entry, flist):\n",
|
||||
" drop_entry = mill_drop(entry)\n",
|
||||
"# plot_pd(drop_entry['data'])\n",
|
||||
"# \n",
|
||||
" shorten_entry = cut_force(drop_entry)\n",
|
||||
"# plot_pd(shorten_entry['data'])\n",
|
||||
"# \n",
|
||||
" fnorm_entry = norm_force(shorten_entry, flist)\n",
|
||||
"# plot_pd(fnorm_entry['data'])\n",
|
||||
"# \n",
|
||||
" time_entry = time_trans(shorten_entry)\n",
|
||||
"# plot_pd(time_entry['data'])\n",
|
||||
"# \n",
|
||||
" norm_entry = norm(time_entry)\n",
|
||||
"# plot_pd(norm_entry['data'], False)\n",
|
||||
" return norm_entry\n",
|
||||
"\n",
|
||||
"print(\"Preprocessing...\")\n",
|
||||
"pdata = preproc(data)\n",
|
||||
"# plot_pd(pdata[0]['data'], False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2d576b5d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Truncating...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def throw(pdata):\n",
|
||||
" llist = pd.Series([len(x['data']) for x in pdata])\n",
|
||||
" threshold = int(llist.quantile(threshold_p))\n",
|
||||
" longdex = np.where(llist <= threshold)[0]\n",
|
||||
" return np.array(pdata)[longdex]\n",
|
||||
"\n",
|
||||
"llist = pd.Series([len(x['data']) for x in pdata])\n",
|
||||
"threshold_p = 0.75\n",
|
||||
"threshold = int(llist.quantile(threshold_p))\n",
|
||||
"\n",
|
||||
"print(\"Truncating...\")\n",
|
||||
"tpdata = throw(pdata)\n",
|
||||
"# plot_pd(tpdata[0]['data'], False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "b3eb709e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 9%|▉ | 1785/19640 [00:00<00:01, 17844.40it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Padding...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 19640/19640 [00:01<00:00, 18711.98it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
|
||||
"# ltpdata = []\n",
|
||||
"def elong(tpdata):\n",
|
||||
" for x in tqdm(tpdata):\n",
|
||||
" y = x['data'].to_numpy().T\n",
|
||||
" x['data'] = pad_sequences(y, dtype=float, padding='post', maxlen=threshold)\n",
|
||||
" return tpdata\n",
|
||||
"\n",
|
||||
"print(\"Padding...\")\n",
|
||||
"ltpdata = elong(tpdata)\n",
|
||||
"# plot_np(ltpdata[0]['data'], False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "73a2a874",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tensorflow as tf\n",
|
||||
"from tensorflow.keras.regularizers import l2\n",
|
||||
"from tensorflow.keras.models import Sequential\n",
|
||||
"from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout\n",
|
||||
"from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n",
|
||||
"from tensorflow.keras.optimizers import Adam\n",
|
||||
"\n",
|
||||
"def build_model(shape, classes):\n",
|
||||
" model = Sequential()\n",
|
||||
" \n",
|
||||
" ncount = shape[0]*shape[1]\n",
|
||||
" \n",
|
||||
" model.add(Flatten(input_shape=shape, name='flatten'))\n",
|
||||
" \n",
|
||||
" model.add(Dropout(drop_count, name=f'dropout_{drop_count*100}'))\n",
|
||||
" model.add(BatchNormalization(name='batchNorm'))\n",
|
||||
" \n",
|
||||
" for i in range(1,layer_count+1):\n",
|
||||
" neurons = int(ncount/pow(dense_steps,i))\n",
|
||||
" if neurons <= classes:\n",
|
||||
" break\n",
|
||||
" model.add(Dropout(drop_count*i, name=f'HiddenDropout_{drop_count*i*100:.0f}'))\n",
|
||||
" model.add(Dense(neurons, activation='relu', \n",
|
||||
" kernel_regularizer=l2(0.001), name=f'Hidden_{i}')\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" model.add(Dense(classes, activation='softmax', name='Output'))\n",
|
||||
" \n",
|
||||
" model.compile(\n",
|
||||
" optimizer=Adam(),\n",
|
||||
" loss=\"categorical_crossentropy\", \n",
|
||||
" metrics=[\"acc\"],\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" return model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "8ae93baa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"checkpoint_file = './goat.weights'\n",
|
||||
"\n",
|
||||
"def train(X_train, y_train, X_test, y_test):\n",
|
||||
" model = build_model(X_train[0].shape, 52)\n",
|
||||
" \n",
|
||||
" model.summary()\n",
|
||||
" \n",
|
||||
" # Create a callback that saves the model's weights\n",
|
||||
" model_checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='loss', \n",
|
||||
"\t\t\tsave_best_only=True)\n",
|
||||
" \n",
|
||||
" history = model.fit(X_train, y_train, \n",
|
||||
" epochs=30,\n",
|
||||
" batch_size=256,\n",
|
||||
" shuffle=True,\n",
|
||||
" validation_data=(X_test, y_test),\n",
|
||||
" verbose=2,\n",
|
||||
" callbacks=[model_checkpoint]\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" model.load_weights(checkpoint_path)\n",
|
||||
" print(\"Evaluate on test data\")\n",
|
||||
" return model, history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "9668ef09",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # this is required\n",
|
||||
"os.environ['CUDA_VISIBLE_DEVICES'] = '0' # set to '0' for GPU0, '1' for GPU1 or '2' for GPU2. Check \"gpustat\" in a terminal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "0bd41ed5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n",
|
||||
"\n",
|
||||
"X = np.array([x['data'] for x in ltpdata])\n",
|
||||
"y = np.array([x['label'] for x in ltpdata])\n",
|
||||
"\n",
|
||||
"lb = LabelBinarizer()\n",
|
||||
"y_tran = lb.fit_transform(y)\n",
|
||||
"\n",
|
||||
"X_train, X_test, y_train, y_test = train_test_split(X, y_tran, test_size=0.2, random_state=177013)\n",
|
||||
"\n",
|
||||
"X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2])\n",
|
||||
"X_test=X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2])\n",
|
||||
"\n",
|
||||
"train_shape = X_train[0].shape\n",
|
||||
"classes = y_train[0].shape[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "2c25c41b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model: \"sequential\"\n",
|
||||
"_________________________________________________________________\n",
|
||||
"Layer (type) Output Shape Param # \n",
|
||||
"=================================================================\n",
|
||||
"flatten (Flatten) (None, 1050) 0 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"dropout_10.0 (Dropout) (None, 1050) 0 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"batchNorm (BatchNormalizatio (None, 1050) 4200 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"HiddenDropout_10 (Dropout) (None, 1050) 0 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"Hidden_1 (Dense) (None, 525) 551775 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"HiddenDropout_20 (Dropout) (None, 525) 0 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"Hidden_2 (Dense) (None, 262) 137812 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"HiddenDropout_30 (Dropout) (None, 262) 0 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"Hidden_3 (Dense) (None, 131) 34453 \n",
|
||||
"_________________________________________________________________\n",
|
||||
"Output (Dense) (None, 52) 6864 \n",
|
||||
"=================================================================\n",
|
||||
"Total params: 735,104\n",
|
||||
"Trainable params: 733,004\n",
|
||||
"Non-trainable params: 2,100\n",
|
||||
"_________________________________________________________________\n",
|
||||
"Epoch 1/30\n",
|
||||
"62/62 - 2s - loss: 4.8396 - acc: 0.0671 - val_loss: 4.7298 - val_acc: 0.0710\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 2/30\n",
|
||||
"62/62 - 0s - loss: 4.0757 - acc: 0.1609 - val_loss: 4.2353 - val_acc: 0.1031\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 3/30\n",
|
||||
"62/62 - 0s - loss: 3.5292 - acc: 0.2483 - val_loss: 3.9189 - val_acc: 0.1349\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 4/30\n",
|
||||
"62/62 - 0s - loss: 3.1635 - acc: 0.3097 - val_loss: 3.5697 - val_acc: 0.2070\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 5/30\n",
|
||||
"62/62 - 0s - loss: 2.8876 - acc: 0.3607 - val_loss: 3.3103 - val_acc: 0.2487\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 6/30\n",
|
||||
"62/62 - 0s - loss: 2.6724 - acc: 0.4022 - val_loss: 3.0531 - val_acc: 0.3032\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 7/30\n",
|
||||
"62/62 - 0s - loss: 2.5206 - acc: 0.4299 - val_loss: 2.8832 - val_acc: 0.3450\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 8/30\n",
|
||||
"62/62 - 0s - loss: 2.3844 - acc: 0.4576 - val_loss: 2.5853 - val_acc: 0.4234\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 9/30\n",
|
||||
"62/62 - 0s - loss: 2.2780 - acc: 0.4808 - val_loss: 2.3759 - val_acc: 0.4672\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 10/30\n",
|
||||
"62/62 - 0s - loss: 2.2042 - acc: 0.4960 - val_loss: 2.2155 - val_acc: 0.5005\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 11/30\n",
|
||||
"62/62 - 0s - loss: 2.1139 - acc: 0.5190 - val_loss: 2.0585 - val_acc: 0.5425\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 12/30\n",
|
||||
"62/62 - 0s - loss: 2.0391 - acc: 0.5350 - val_loss: 1.9542 - val_acc: 0.5687\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 13/30\n",
|
||||
"62/62 - 0s - loss: 1.9897 - acc: 0.5411 - val_loss: 1.9089 - val_acc: 0.5759\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 14/30\n",
|
||||
"62/62 - 0s - loss: 1.9307 - acc: 0.5551 - val_loss: 1.8783 - val_acc: 0.5832\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 15/30\n",
|
||||
"62/62 - 0s - loss: 1.8869 - acc: 0.5673 - val_loss: 1.8283 - val_acc: 0.5942\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 16/30\n",
|
||||
"62/62 - 0s - loss: 1.8516 - acc: 0.5720 - val_loss: 1.7902 - val_acc: 0.5965\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 17/30\n",
|
||||
"62/62 - 0s - loss: 1.8156 - acc: 0.5860 - val_loss: 1.7896 - val_acc: 0.5894\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 18/30\n",
|
||||
"62/62 - 0s - loss: 1.7877 - acc: 0.5929 - val_loss: 1.7737 - val_acc: 0.6006\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 19/30\n",
|
||||
"62/62 - 0s - loss: 1.7562 - acc: 0.5984 - val_loss: 1.7304 - val_acc: 0.6212\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 20/30\n",
|
||||
"62/62 - 0s - loss: 1.7235 - acc: 0.6043 - val_loss: 1.7242 - val_acc: 0.6156\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 21/30\n",
|
||||
"62/62 - 0s - loss: 1.6954 - acc: 0.6149 - val_loss: 1.7041 - val_acc: 0.6263\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 22/30\n",
|
||||
"62/62 - 0s - loss: 1.6949 - acc: 0.6165 - val_loss: 1.7357 - val_acc: 0.6227\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 23/30\n",
|
||||
"62/62 - 0s - loss: 1.6688 - acc: 0.6208 - val_loss: 1.6868 - val_acc: 0.6354\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 24/30\n",
|
||||
"62/62 - 0s - loss: 1.6374 - acc: 0.6268 - val_loss: 1.6755 - val_acc: 0.6275\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 25/30\n",
|
||||
"62/62 - 0s - loss: 1.6202 - acc: 0.6383 - val_loss: 1.6566 - val_acc: 0.6393\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 26/30\n",
|
||||
"62/62 - 0s - loss: 1.5944 - acc: 0.6424 - val_loss: 1.6365 - val_acc: 0.6441\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 27/30\n",
|
||||
"62/62 - 0s - loss: 1.5963 - acc: 0.6435 - val_loss: 1.6578 - val_acc: 0.6334\n",
|
||||
"Epoch 28/30\n",
|
||||
"62/62 - 0s - loss: 1.5958 - acc: 0.6412 - val_loss: 1.6364 - val_acc: 0.6357\n",
|
||||
"Epoch 29/30\n",
|
||||
"62/62 - 0s - loss: 1.5655 - acc: 0.6501 - val_loss: 1.6174 - val_acc: 0.6510\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Epoch 30/30\n",
|
||||
"62/62 - 0s - loss: 1.5553 - acc: 0.6498 - val_loss: 1.6273 - val_acc: 0.6410\n",
|
||||
"INFO:tensorflow:Assets written to: training_1/cp.ckpt/assets\n",
|
||||
"Evaluate on test data\n",
|
||||
"CPU times: user 40.6 s, sys: 3.77 s, total: 44.3 s\n",
|
||||
"Wall time: 36.3 s\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"if 'model' not in locals():\n",
|
||||
" tf.keras.backend.clear_session()\n",
|
||||
" model, history = train(np.array(X_train), np.array(y_train), np.array(X_test), np.array(y_test))\n",
|
||||
"else:\n",
|
||||
" print(\"Loaded weights...\")\n",
|
||||
" model.load_weights(checkpoint_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "adb16aa9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(14, 75)"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"X_test[0].shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0f26ada5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_keras_history(history, name='', acc='acc'):\n",
|
||||
" \"\"\"Plots keras history.\"\"\"\n",
|
||||
" import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
" training_acc = history.history[acc]\n",
|
||||
" validation_acc = history.history['val_' + acc]\n",
|
||||
" loss = history.history['loss']\n",
|
||||
" val_loss = history.history['val_loss']\n",
|
||||
"\n",
|
||||
" epochs = range(len(training_acc))\n",
|
||||
"\n",
|
||||
" plt.ylim(0, 1)\n",
|
||||
" plt.plot(epochs, training_acc, 'tab:blue', label='Training acc')\n",
|
||||
" plt.plot(epochs, validation_acc, 'tab:orange', label='Validation acc')\n",
|
||||
" plt.title('Training and validation accuracy ' + name)\n",
|
||||
" plt.legend()\n",
|
||||
"\n",
|
||||
" plt.figure()\n",
|
||||
"\n",
|
||||
" plt.plot(epochs, loss, 'tab:green', label='Training loss')\n",
|
||||
" plt.plot(epochs, val_loss, 'tab:red', label='Validation loss')\n",
|
||||
" plt.title('Training and validation loss ' + name)\n",
|
||||
" plt.legend()\n",
|
||||
" plt.show()\n",
|
||||
" plt.close()\n",
|
||||
"plot_keras_history(history)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1d32900e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,2 +0,0 @@
|
|||
model_checkpoint_path: "goat.weights"
|
||||
all_model_checkpoint_paths: "goat.weights"
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -1,2 +0,0 @@
|
|||
model_checkpoint_path: "goat.weights"
|
||||
all_model_checkpoint_paths: "goat.weights"
|
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue