My solution for the 'Deep Learning Regression with Admissions Data' project

import app

import pandas as pd

import numpy as np

import matplotlib.pyplot as plt

import tensorflow as tf

from tensorflow import keras

from tensorflow.keras.models import Sequential

from tensorflow.keras.callbacks import EarlyStopping

from tensorflow.keras import layers

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

from sklearn.preprocessing import Normalizer

from sklearn.metrics import r2_score

df = pd.read_csv(“admissions_data.csv”)

#print(df.head())

features, labels = df.iloc[:, 0:-1], df.iloc[:, -1]

features = pd.get_dummies(features)

features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.2, random_state=23)

scaler = StandardScaler()

features_train_scaled = scaler.fit_transform(features_train)

features_test_scaled = scaler.transform(features_test)

def create_model(features, neurons, activation_name, rate, features_train, features_test, labels_train, labels_test, epochs, batch_size):

model = Sequential()

input_layer = layers.InputLayer(input_shape=(features.shape[1], ))

model.add(input_layer)

model.add(layers.Dense(neurons, activation=activation_name))

model.add(layers.Dense(1))

opt = keras.optimizers.Adam(learning_rate=rate)

model.compile(loss=“mse”, metrics=[“mae”], optimizer=opt)

model.fit(features_train, labels_train, epochs=epochs, batch_size=batch_size, verbose=1)

return model

model = create_model(features, 64, “sigmoid”, 0.1, features_train_scaled, features_test_scaled, labels_train, labels_test, 100, 16)

res_mse, res_mae = model.evaluate(features_test_scaled, labels_test, verbose=0)

print(res_mse, res_mae)

It might be better to post a link to the GitHub repo for the project, rather than unformatted Python code.

1 Like