I’m working on this code but i’m having an error I don’t know why
import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from keras.datasets import fashion_mnist
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization
from keras.layers import LeakyReLU
(train_X,train_Y), (test_X,test_Y) = fashion_mnist.load_data()
print('Training data shape : ', train_X.shape, train_Y.shape)
print('Testing data shape : ', test_X.shape, test_Y.shape)
classes = np.unique(train_Y)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output classes : ', classes)
plt.figure(figsize=[5,5])
plt.subplot(121)
plt.imshow(train_X[0,:,:], cmap=‘gray’)
plt.title(“Ground Truth : {}”.format(train_Y[0]))
plt.subplot(122)
plt.imshow(test_X[0,:,:], cmap=‘gray’)
plt.title(“Ground Truth : {}”.format(test_Y[0]))
train_X = train_X.reshape(-1, 28,28, 1)
test_X = test_X.reshape(-1, 28,28, 1)
train_X.shape, test_X.shape
train_X = train_X.astype(‘float32’)
test_X = test_X.astype(‘float32’)
train_X = train_X / 255.
test_X = test_X / 255.
from tensorflow.keras.utils import to_categorical
train_Y_one_hot = to_categorical(train_Y)
test_Y_one_hot = to_categorical(test_Y)
train_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_one_hot, test_size=0.2, random_state=13)
cnn = tf.keras.models.Sequential()
batch_size = 64
epochs = 20
num_classes = 10
fashion_model = Sequential()
fashion_model.add(Conv2D(32, kernel_size=(3, 3),activation=‘linear’,input_shape=(28,28,1),padding=‘same’))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D((2, 2),padding=‘same’))
fashion_model.add(Conv2D(64, (3, 3), activation=‘linear’,padding=‘same’))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding=‘same’))
fashion_model.add(Conv2D(128, (3, 3), activation=‘linear’,padding=‘same’))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding=‘same’))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation=‘linear’))
fashion_model.add(LeakyReLU(alpha=0.1))
fashion_model.add(Dense(num_classes, activation=‘softmax’))
fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=[‘accuracy’]) #Error Here
ValueError Traceback (most recent call last)
Cell In [21], line 1
----> 1 fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=[‘accuracy’])
2 fashion_model.summary()
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py:568, in Model.compile(self, optimizer, loss, metrics, loss_weights, weighted_metrics, run_eagerly, steps_per_execution, **kwargs)
565 self._validate_compile(optimizer, metrics, **kwargs)
566 self._run_eagerly = run_eagerly
→ 568 self.optimizer = self._get_optimizer(optimizer)
569 self.compiled_loss = compile_utils.LossesContainer(
570 loss, loss_weights, output_names=self.output_names)
571 self.compiled_metrics = compile_utils.MetricsContainer(
572 metrics, weighted_metrics, output_names=self.output_names,
573 from_serialized=from_serialized)
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py:606, in Model._get_optimizer(self, optimizer)
603 opt = lso.LossScaleOptimizerV1(opt, loss_scale)
604 return opt
→ 606 return nest.map_structure(_get_single_optimizer, optimizer)
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\util\nest.py:917, in map_structure(func, *structure, **kwargs)
913 flat_structure = (flatten(s, expand_composites) for s in structure)
914 entries = zip(*flat_structure)
916 return pack_sequence_as(
→ 917 structure[0], [func(*x) for x in entries],
918 expand_composites=expand_composites)
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\util\nest.py:917, in (.0)
913 flat_structure = (flatten(s, expand_composites) for s in structure)
914 entries = zip(*flat_structure)
916 return pack_sequence_as(
→ 917 structure[0], [func(*x) for x in entries],
918 expand_composites=expand_composites)
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py:597, in Model._get_optimizer.._get_single_optimizer(opt)
596 def _get_single_optimizer(opt):
→ 597 opt = optimizers.get(opt)
598 if (loss_scale is not None and
599 not isinstance(opt, lso.LossScaleOptimizer)):
600 if loss_scale == ‘dynamic’:
File ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\optimizers.py:131, in get(identifier)
129 return deserialize(config)
130 else:
→ 131 raise ValueError(
132 ‘Could not interpret optimizer identifier: {}’.format(identifier))
ValueError: Could not interpret optimizer identifier: <keras.optimizers.optimizer_v2.adam.Adam object at 0x000002384FED0220>