{ "cells": [ { "cell_type": "markdown", "source": [ "# Quickstart Tutorial\n", "\n", "In this short introduction to the Masterful AutoML Platform, you will train a model start to finish with Masterful. \n", "\n", "First, import the necessary libraries.\n", "\n" ], "metadata": {} }, { "cell_type": "code", "execution_count": 1, "source": [ "import tensorflow as tf\n", "import tensorflow_datasets as tfds\n", "\n", "import sys\n", "sys.path.append('../../../')\n", "\n", "import masterful" ], "outputs": [], "metadata": {} }, { "cell_type": "markdown", "source": [ "Use Tensorflow Datasets to load CIFAR-10. " ], "metadata": {} }, { "cell_type": "code", "execution_count": 2, "source": [ "TRAINPER=5\n", "\n", "(train, test) = tfds.load('cifar10', as_supervised=True, batch_size=32, split=[f'train[:{TRAINPER}%]', 'test[:10%]'],with_info=False)" ], "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "2021-09-14 18:45:50.621668: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] } ], "metadata": {} }, { "cell_type": "markdown", "source": [ "Masterful works best with dense labels, so apply one-hot to your sparse labels." ], "metadata": {} }, { "cell_type": "code", "execution_count": 3, "source": [ "def one_hot(image, label):\n", " return image, tf.one_hot(label, 10)\n", "\n", "train = train.map(one_hot)\n", "test = test.map(one_hot)" ], "outputs": [], "metadata": {} }, { "cell_type": "markdown", "source": [ "Build a simple model to classify CIFAR-10. " ], "metadata": {} }, { "cell_type": "code", "execution_count": 4, "source": [ "def get_model():\n", " model = tf.keras.models.Sequential()\n", " model.add(tf.keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(32, 32, 3)))\n", " model.add(tf.keras.layers.Conv2D(16, (3, 3), activation='relu',))\n", " model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n", " model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu',))\n", " model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n", " model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu',))\n", " model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n", " model.add(tf.keras.layers.GlobalAveragePooling2D())\n", " model.add(tf.keras.layers.Dense(10, activation='softmax'))\n", "\n", " return model\n", "\n", "model = get_model()" ], "outputs": [], "metadata": {} }, { "cell_type": "markdown", "source": [ "Train the model to completion. \n" ], "metadata": {} }, { "cell_type": "code", "execution_count": 5, "source": [ "model.compile(optimizer='adam',\n", " loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),\n", " metrics=['accuracy'])\n", " \n", "early_stopping = tf.keras.callbacks.EarlyStopping(patience=10, verbose=1)\n", "reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=4, verbose=1)\n", "\n", "model.fit(train, validation_data=test, callbacks=[early_stopping, reduce_lr], epochs=tf.int32.max, verbose=2)" ], "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "2021-09-14 18:45:51.460624: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Epoch 1/2147483647\n", "79/79 - 3s - loss: 2.2747 - accuracy: 0.1304 - val_loss: 2.2188 - val_accuracy: 0.1630\n", "Epoch 2/2147483647\n", "79/79 - 1s - loss: 2.0878 - accuracy: 0.2176 - val_loss: 1.9740 - val_accuracy: 0.2360\n", "Epoch 3/2147483647\n", "79/79 - 1s - loss: 1.9572 - accuracy: 0.2708 - val_loss: 1.8702 - val_accuracy: 0.2750\n", "Epoch 4/2147483647\n", "79/79 - 1s - loss: 1.8666 - accuracy: 0.3068 - val_loss: 1.8110 - val_accuracy: 0.2860\n", "Epoch 5/2147483647\n", "79/79 - 1s - loss: 1.7961 - accuracy: 0.3400 - val_loss: 1.7728 - val_accuracy: 0.3110\n", "Epoch 6/2147483647\n", "79/79 - 1s - loss: 1.7362 - accuracy: 0.3596 - val_loss: 1.7350 - val_accuracy: 0.3370\n", "Epoch 7/2147483647\n", "79/79 - 1s - loss: 1.6865 - accuracy: 0.3808 - val_loss: 1.7020 - val_accuracy: 0.3620\n", "Epoch 8/2147483647\n", "79/79 - 1s - loss: 1.6432 - accuracy: 0.4080 - val_loss: 1.6791 - val_accuracy: 0.3730\n", "Epoch 9/2147483647\n", "79/79 - 1s - loss: 1.6032 - accuracy: 0.4284 - val_loss: 1.6526 - val_accuracy: 0.3800\n", "Epoch 10/2147483647\n", "79/79 - 1s - loss: 1.5683 - accuracy: 0.4464 - val_loss: 1.6310 - val_accuracy: 0.3920\n", "Epoch 11/2147483647\n", "79/79 - 1s - loss: 1.5354 - accuracy: 0.4568 - val_loss: 1.6151 - val_accuracy: 0.4010\n", "Epoch 12/2147483647\n", "79/79 - 1s - loss: 1.5049 - accuracy: 0.4632 - val_loss: 1.5887 - val_accuracy: 0.4210\n", "Epoch 13/2147483647\n", "79/79 - 1s - loss: 1.4763 - accuracy: 0.4724 - val_loss: 1.5797 - val_accuracy: 0.4200\n", "Epoch 14/2147483647\n", "79/79 - 1s - loss: 1.4485 - accuracy: 0.4764 - val_loss: 1.5675 - val_accuracy: 0.4170\n", "Epoch 15/2147483647\n", "79/79 - 1s - loss: 1.4224 - accuracy: 0.4892 - val_loss: 1.5543 - val_accuracy: 0.4230\n", "Epoch 16/2147483647\n", "79/79 - 1s - loss: 1.3973 - accuracy: 0.4984 - val_loss: 1.5448 - val_accuracy: 0.4300\n", "Epoch 17/2147483647\n", "79/79 - 1s - loss: 1.3735 - accuracy: 0.5064 - val_loss: 1.5414 - val_accuracy: 0.4320\n", "Epoch 18/2147483647\n", "79/79 - 1s - loss: 1.3503 - accuracy: 0.5148 - val_loss: 1.5421 - val_accuracy: 0.4400\n", "Epoch 19/2147483647\n", "79/79 - 1s - loss: 1.3279 - accuracy: 0.5268 - val_loss: 1.5367 - val_accuracy: 0.4460\n", "Epoch 20/2147483647\n", "79/79 - 1s - loss: 1.3054 - accuracy: 0.5400 - val_loss: 1.5309 - val_accuracy: 0.4430\n", "Epoch 21/2147483647\n", "79/79 - 1s - loss: 1.2828 - accuracy: 0.5464 - val_loss: 1.5276 - val_accuracy: 0.4400\n", "Epoch 22/2147483647\n", "79/79 - 1s - loss: 1.2613 - accuracy: 0.5516 - val_loss: 1.5298 - val_accuracy: 0.4380\n", "Epoch 23/2147483647\n", "79/79 - 1s - loss: 1.2421 - accuracy: 0.5620 - val_loss: 1.5295 - val_accuracy: 0.4440\n", "Epoch 24/2147483647\n", "79/79 - 1s - loss: 1.2234 - accuracy: 0.5700 - val_loss: 1.5303 - val_accuracy: 0.4400\n", "Epoch 25/2147483647\n", "79/79 - 1s - loss: 1.2044 - accuracy: 0.5832 - val_loss: 1.5313 - val_accuracy: 0.4480\n", "\n", "Epoch 00025: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\n", "Epoch 26/2147483647\n", "79/79 - 1s - loss: 1.1333 - accuracy: 0.6120 - val_loss: 1.4211 - val_accuracy: 0.4940\n", "Epoch 27/2147483647\n", "79/79 - 1s - loss: 1.1132 - accuracy: 0.6256 - val_loss: 1.4189 - val_accuracy: 0.4910\n", "Epoch 28/2147483647\n", "79/79 - 1s - loss: 1.1075 - accuracy: 0.6244 - val_loss: 1.4190 - val_accuracy: 0.4910\n", "Epoch 29/2147483647\n", "79/79 - 1s - loss: 1.1029 - accuracy: 0.6252 - val_loss: 1.4195 - val_accuracy: 0.4920\n", "Epoch 30/2147483647\n", "79/79 - 1s - loss: 1.0990 - accuracy: 0.6256 - val_loss: 1.4197 - val_accuracy: 0.4910\n", "Epoch 31/2147483647\n", "79/79 - 1s - loss: 1.0954 - accuracy: 0.6264 - val_loss: 1.4198 - val_accuracy: 0.4920\n", "\n", "Epoch 00031: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\n", "Epoch 32/2147483647\n", "79/79 - 1s - loss: 1.0900 - accuracy: 0.6296 - val_loss: 1.4106 - val_accuracy: 0.4980\n", "Epoch 33/2147483647\n", "79/79 - 1s - loss: 1.0827 - accuracy: 0.6308 - val_loss: 1.4100 - val_accuracy: 0.5020\n", "Epoch 34/2147483647\n", "79/79 - 1s - loss: 1.0817 - accuracy: 0.6340 - val_loss: 1.4100 - val_accuracy: 0.5040\n", "Epoch 35/2147483647\n", "79/79 - 1s - loss: 1.0811 - accuracy: 0.6344 - val_loss: 1.4101 - val_accuracy: 0.5040\n", "Epoch 36/2147483647\n", "79/79 - 1s - loss: 1.0807 - accuracy: 0.6352 - val_loss: 1.4101 - val_accuracy: 0.5040\n", "Epoch 37/2147483647\n", "79/79 - 1s - loss: 1.0803 - accuracy: 0.6352 - val_loss: 1.4101 - val_accuracy: 0.5030\n", "\n", "Epoch 00037: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.\n", "Epoch 38/2147483647\n", "79/79 - 1s - loss: 1.0785 - accuracy: 0.6340 - val_loss: 1.4100 - val_accuracy: 0.5030\n", "Epoch 39/2147483647\n", "79/79 - 1s - loss: 1.0784 - accuracy: 0.6344 - val_loss: 1.4099 - val_accuracy: 0.5030\n", "Epoch 40/2147483647\n", "79/79 - 1s - loss: 1.0783 - accuracy: 0.6344 - val_loss: 1.4099 - val_accuracy: 0.5030\n", "Epoch 41/2147483647\n", "79/79 - 1s - loss: 1.0782 - accuracy: 0.6344 - val_loss: 1.4098 - val_accuracy: 0.5030\n", "Epoch 42/2147483647\n", "79/79 - 1s - loss: 1.0782 - accuracy: 0.6356 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 43/2147483647\n", "79/79 - 1s - loss: 1.0781 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 44/2147483647\n", "79/79 - 1s - loss: 1.0781 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 45/2147483647\n", "79/79 - 1s - loss: 1.0780 - accuracy: 0.6356 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "\n", "Epoch 00045: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.\n", "Epoch 46/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 47/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 48/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 49/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "\n", "Epoch 00049: ReduceLROnPlateau reducing learning rate to 1.000000082740371e-08.\n", "Epoch 50/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 51/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 52/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "Epoch 53/2147483647\n", "79/79 - 1s - loss: 1.0778 - accuracy: 0.6348 - val_loss: 1.4098 - val_accuracy: 0.5020\n", "\n", "Epoch 00053: ReduceLROnPlateau reducing learning rate to 1.000000082740371e-09.\n", "Epoch 00053: early stopping\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "" ] }, "metadata": {}, "execution_count": 5 } ], "metadata": { "tags": [] } }, { "cell_type": "markdown", "source": [ "Evaluate the model to see how well it did without Masterful. You should get a model around 51% accurate. In practice, you would need to run multiple experiments and run a t-test to get a statistically significant result. " ], "metadata": {} }, { "cell_type": "code", "execution_count": 6, "source": [ "model.evaluate(test)" ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "32/32 [==============================] - 0s 3ms/step - loss: 1.4098 - accuracy: 0.5020\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "[1.4098398685455322, 0.5019999742507935]" ] }, "metadata": {}, "execution_count": 6 } ], "metadata": {} }, { "cell_type": "markdown", "source": [ "Now train with Masterful. First, specify some design choices about your model by using `masterful.spec.create_model_and_data_specs`. Next, call `masterful.autofit`, which runs Masterful's metalearning algorithm and then applies it. " ], "metadata": {} }, { "cell_type": "code", "execution_count": 7, "source": [ "del model\n", "model = get_model()\n", "\n", "model_spec, data_spec = masterful.spec.create_model_and_data_specs(\n", " model=model, \n", " dataset=train.unbatch(), \n", " task=masterful.spec.Task.CLASSIFICATION, \n", " image_range=masterful.spec.ImageRange.ZERO_255, \n", " num_classes=10, \n", " sparse=False, \n", " from_logits=False)\n", "\n", "masterful.autofit(model, model_spec, train.unbatch(), data_spec, validation_data=test.unbatch())\n" ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "WARNING:tensorflow:From ../../../masterful/utils/find_batchsize.py:40: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use `tf.config.list_physical_devices('GPU')` instead.\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "WARNING:tensorflow:From ../../../masterful/utils/find_batchsize.py:40: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use `tf.config.list_physical_devices('GPU')` instead.\n", "../../../masterful/utils/find_batchsize.py:47: UserWarning: WARNING: No GPU available. Using fixed batch size of 32\n", " warnings.warn(\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "500/500 [==============================] - 6s 10ms/step - loss: 2.2633\n", "The logs are being saved to /Users/yaosh/.masterful/UTC_2021-09-14__22-46-49.945599/log-analyze-UTC_2021-09-14__22-46-49.945599.txt\n", "The logs are being saved to /Users/yaosh/.masterful/UTC_2021-09-14__22-46-49.945599/log-analyze-UTC_2021-09-14__22-46-49.945599.txt\n", "Masterful backend.find_augmentation_policy() STARTING... \n", "Baseline analysis STARTING... Args: {'x': , 'epochs': 2147483647, 'validation_data': , 'callbacks': [, ]}\n", "Masterful backend.analyze() DONE. \n", "A file with name: interim-bridge-analyze.pstpb was found while override was set to False.\n", "500/500 [==============================] - 7s 11ms/step - loss: 0.9418\n", "The logs are being saved to /Users/yaosh/.masterful/UTC_2021-09-14__23-25-06.931697/log-fit-UTC_2021-09-14__23-25-06.931697.txt\n", "The logs are being saved to /Users/yaosh/.masterful/UTC_2021-09-14__23-25-06.931697/log-fit-UTC_2021-09-14__23-25-06.931697.txt\n", "Masterful backend.fit() STARTING...\n", "Loaded config from /Users/yaosh/.masterful/policies/interim-bridge-fit\n", "Masterful backend.fit with policy /Users/yaosh/.masterful/policies/interim-bridge-fit phase 1 of 2 STARTING...\n", "Epoch 1/2147483647\n", "79/79 [==============================] - 19s 51ms/step - loss: 1.5700 - val_loss: 1.4144\n", "Epoch 2/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5917 - val_loss: 1.3346\n", "Epoch 3/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5551 - val_loss: 1.3347\n", "Epoch 4/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5999 - val_loss: 1.3054\n", "Epoch 5/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5796 - val_loss: 1.2822\n", "Epoch 6/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5731 - val_loss: 1.3102\n", "Epoch 7/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5587 - val_loss: 1.3041\n", "Epoch 8/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5436 - val_loss: 1.3193\n", "Epoch 9/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5609 - val_loss: 1.3037\n", "Epoch 10/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5560 - val_loss: 1.2685\n", "Epoch 11/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5782 - val_loss: 1.3367\n", "Epoch 12/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5487 - val_loss: 1.2593\n", "Epoch 13/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5633 - val_loss: 1.3003\n", "Epoch 14/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5642 - val_loss: 1.3707\n", "Epoch 15/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.5522 - val_loss: 1.3814\n", "Epoch 16/2147483647\n", "79/79 [==============================] - 4s 52ms/step - loss: 1.5609 - val_loss: 1.3255\n", "Epoch 17/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.5218 - val_loss: 1.2996\n", "Epoch 18/2147483647\n", "79/79 [==============================] - 4s 50ms/step - loss: 1.5129 - val_loss: 1.2794\n", "Epoch 19/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4661 - val_loss: 1.2664\n", "Epoch 20/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4742 - val_loss: 1.2593\n", "Epoch 21/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4712 - val_loss: 1.2543\n", "Epoch 22/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4860 - val_loss: 1.2446\n", "Epoch 23/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4615 - val_loss: 1.2713\n", "Epoch 24/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4886 - val_loss: 1.2341\n", "Epoch 25/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4912 - val_loss: 1.2808\n", "Epoch 26/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4933 - val_loss: 1.2643\n", "Epoch 27/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4550 - val_loss: 1.2435\n", "Epoch 28/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4859 - val_loss: 1.2653\n", "Epoch 29/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4823 - val_loss: 1.2459\n", "Epoch 30/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4740 - val_loss: 1.2510\n", "Epoch 31/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4524 - val_loss: 1.2561\n", "Epoch 32/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.4516 - val_loss: 1.2597\n", "Epoch 33/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4719 - val_loss: 1.2360\n", "Epoch 34/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4542 - val_loss: 1.2570\n", "Epoch 35/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4308 - val_loss: 1.2507\n", "Epoch 36/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4596 - val_loss: 1.2436\n", "Epoch 37/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4558 - val_loss: 1.2471\n", "Epoch 38/2147483647\n", "79/79 [==============================] - 4s 49ms/step - loss: 1.4581 - val_loss: 1.2481\n", "Epoch 39/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4448 - val_loss: 1.2445\n", "Epoch 40/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4449 - val_loss: 1.2450\n", "Epoch 41/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4487 - val_loss: 1.2459\n", "Epoch 42/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4421 - val_loss: 1.2477\n", "Epoch 43/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4520 - val_loss: 1.2484\n", "Epoch 44/2147483647\n", "79/79 [==============================] - 4s 48ms/step - loss: 1.4518 - val_loss: 1.2481\n", "Masterful Restore LR: Restoring lr to 0.0012499999720603228.\n", "Fitting model phase 1 of 2 DONE.\n", "Masterful backend.fit with policy /Users/yaosh/.masterful/policies/interim-bridge-fit phase 2 of 2 STARTING...\n", "Epoch 45/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.4773 - val_loss: 1.8307\n", "Epoch 46/2147483691\n", "79/79 [==============================] - 4s 52ms/step - loss: 2.2624 - val_loss: 1.8694\n", "Epoch 47/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2504 - val_loss: 1.8442\n", "Epoch 48/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2481 - val_loss: 1.8308\n", "Epoch 49/2147483691\n", "79/79 [==============================] - 4s 52ms/step - loss: 2.2466 - val_loss: 1.8446\n", "Epoch 50/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2454 - val_loss: 1.8291\n", "Epoch 51/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2443 - val_loss: 1.8542\n", "Epoch 52/2147483691\n", "79/79 [==============================] - 4s 50ms/step - loss: 2.2436 - val_loss: 1.8459\n", "Epoch 53/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2446 - val_loss: 1.8340\n", "Epoch 54/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2428 - val_loss: 1.8411\n", "Epoch 55/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2436 - val_loss: 1.8519\n", "Epoch 56/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2414 - val_loss: 1.8213\n", "Epoch 57/2147483691\n", "79/79 [==============================] - 4s 50ms/step - loss: 2.2405 - val_loss: 1.7987\n", "Epoch 58/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2405 - val_loss: 1.8074\n", "Epoch 59/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2400 - val_loss: 1.8353\n", "Epoch 60/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2405 - val_loss: 1.8140\n", "Epoch 61/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2405 - val_loss: 1.8092\n", "Epoch 62/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2409 - val_loss: 1.8203\n", "Epoch 63/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2387 - val_loss: 1.8120\n", "Epoch 64/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2399 - val_loss: 1.8119\n", "Epoch 65/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2398 - val_loss: 1.8108\n", "Epoch 66/2147483691\n", "79/79 [==============================] - 4s 52ms/step - loss: 2.2398 - val_loss: 1.8127\n", "Epoch 67/2147483691\n", "79/79 [==============================] - 4s 50ms/step - loss: 2.2401 - val_loss: 1.8033\n", "Epoch 68/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2391 - val_loss: 1.8117\n", "Epoch 69/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2395 - val_loss: 1.8116\n", "Epoch 70/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2392 - val_loss: 1.8134\n", "Epoch 71/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2389 - val_loss: 1.8125\n", "Epoch 72/2147483691\n", "79/79 [==============================] - 4s 50ms/step - loss: 2.2400 - val_loss: 1.8129\n", "Epoch 73/2147483691\n", "79/79 [==============================] - 4s 52ms/step - loss: 2.2394 - val_loss: 1.8126\n", "Epoch 74/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2391 - val_loss: 1.8115\n", "Epoch 75/2147483691\n", "79/79 [==============================] - 4s 50ms/step - loss: 2.2393 - val_loss: 1.8116\n", "Epoch 76/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2398 - val_loss: 1.8111\n", "Epoch 77/2147483691\n", "79/79 [==============================] - 4s 51ms/step - loss: 2.2395 - val_loss: 1.8106\n", "Masterful Restore LR: Restoring lr to 0.0012499999720603228.\n", "Masterful backend.fit phase 2 of 2 DONE.\n", "Masterful backend.fit() DONE.\n", "32/32 [==============================] - 0s 3ms/step - loss: 1.2481 - categorical_accuracy: 0.5580\n" ] }, { "output_type": "execute_result", "data": { }, "metadata": {}, "execution_count": 7 } ], "metadata": { "tags": [ "outputPrepend" ] } }, { "cell_type": "markdown", "source": [ "Evaluate the model trained with Masterful. Your model should now be about 55% accurate. " ], "metadata": {} }, { "cell_type": "code", "execution_count": 8, "source": [ "model.evaluate(test)" ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "32/32 [==============================] - 0s 3ms/step - loss: 1.2481 - categorical_accuracy: 0.5580\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "[1.2481117248535156, 0.5580000281333923]" ] }, "metadata": {}, "execution_count": 8 } ], "metadata": {} } ], "metadata": { "interpreter": { "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" }, "kernelspec": { "name": "python3", "display_name": "Python 3.8.9 64-bit" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.9" } }, "nbformat": 4, "nbformat_minor": 2 }