StandardScaler() error while scaling the data

Error in excercise

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from sklearn.metrics import r2_score

admissions_data = pd.read_csv('admissions_data.csv')
labels = admissions_data.iloc[:, -1]
features = admissions_data.iloc[:, 1:8]
features_train, labels_train, features_test, labels_test = train_test_split(features, labels, test_size=0.2, random_state=13)
sc = StandardScaler()
features_train_scaled = sc.fit_transform(features_train)
features_test_scale = sc.transform(features_test)
features_train_scaled = pd.DataFrame(features_train_scaled)
features_test_scale = pd.DataFrame(features_test_scale)

the error is:

Any ideas how to fix that?

Traceback (most recent call last):
  File "script.py", line 26, in <module>
    features_test_scale = sc.transform(features_test)
  File "/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/_data.py", line 794, in transform
    force_all_finite='allow-nan')
  File "/usr/local/lib/python3.6/dist-packages/sklearn/base.py", line 420, in _validate_data
    X = check_array(X, **check_params)
  File "/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py", line 73, in inner_f
    return f(**kwargs)
  File "/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py", line 624, in check_array
    "if it contains a single sample.".format(array))
ValueError: Expected 2D array, got 1D array instead:
array=[0.57 0.78 0.59 0.64 0.47 0.63 0.65 0.89 0.84 0.73 0.75 0.64 0.46 0.78
 0.62 0.53 0.85 0.67 0.84 0.94 0.64 0.53 0.47 0.86 0.62 0.7  0.77 0.61
 0.61 0.63 0.86 0.82 0.65 0.58 0.7  0.7  0.84 0.72 0.71 0.77 0.69 0.8
 0.52 0.62 0.79 0.71 0.9  0.84 0.6  0.86 0.67 0.61 0.71 0.52 0.62 0.37
 0.73 0.64 0.71 0.8  0.88 0.78 0.45 0.62 0.62 0.86 0.74 0.94 0.58 0.7
 0.92 0.64 0.65 0.83 0.34 0.66 0.67 0.7  0.71 0.54 0.68 0.61 0.68 0.79
 0.57 0.94 0.59 0.79 0.73 0.91 0.86 0.95 0.9  0.92 0.68 0.84 0.69 0.72
 0.94 0.53 0.45 0.77 0.77 0.91 0.61 0.78 0.77 0.82 0.9  0.92 0.54 0.92
 0.72 0.5  0.68 0.78 0.72 0.53 0.79 0.49 0.68 0.72 0.73 0.93 0.72 0.52
 0.54 0.86 0.65 0.93 0.89 0.72 0.34 0.64 0.96 0.79 0.73 0.49 0.73 0.94
 0.7  0.95 0.65 0.86 0.78 0.75 0.89 0.94 0.91 0.87 0.93 0.81 0.94 0.89
 0.57 0.77 0.39 0.46 0.78 0.64 0.76 0.58 0.56 0.53 0.79 0.9  0.92 0.96
 0.67 0.65 0.64 0.58 0.94 0.76 0.78 0.88 0.84 0.68 0.66 0.42 0.56 0.66
 0.46 0.65 0.58 0.72 0.48 0.68 0.89 0.95 0.46 0.71 0.79 0.52 0.57 0.76
 0.52 0.8  0.77 0.91 0.75 0.49 0.72 0.72 0.61 0.97 0.8  0.85 0.73 0.64
 0.87 0.63 0.97 0.72 0.82 0.54 0.71 0.45 0.8  0.49 0.77 0.93 0.89 0.93
 0.81 0.62 0.81 0.66 0.78 0.76 0.48 0.61 0.82 0.68 0.7  0.68 0.62 0.81
 0.87 0.94 0.38 0.67 0.64 0.84 0.62 0.7  0.62 0.5  0.79 0.78 0.36 0.77
 0.57 0.87 0.74 0.71 0.61 0.57 0.64 0.73 0.81 0.74 0.8  0.69 0.66 0.64
 0.93 0.64 0.59 0.71 0.82 0.69 0.69 0.89 0.93 0.74 0.64 0.84 0.91 0.97
 0.55 0.74 0.72 0.71 0.93 0.96 0.8  0.8  0.81 0.88 0.64 0.38 0.87 0.73
 0.78 0.89 0.56 0.61 0.76 0.46 0.78 0.71 0.81 0.59 0.47 0.7  0.42 0.76
 0.8  0.67 0.94 0.65 0.51 0.73 0.9  0.8  0.65 0.7  0.96 0.96 0.73 0.79
 0.86 0.89 0.85 0.76 0.76 0.71 0.83 0.76 0.42 0.9  0.58 0.66 0.86 0.71
 0.8  0.51 0.65 0.58 0.76 0.8  0.7  0.61 0.71 0.69 0.95 0.72 0.79 0.97
 0.74 0.96 0.47 0.56 0.73 0.94 0.76 0.79 0.71 0.58 0.94 0.66 0.75 0.76
 0.84 0.59 0.68 0.75 0.76 0.72 0.87 0.78 0.67 0.79 0.91 0.57 0.77 0.69
 0.73 0.43 0.93 0.68 0.82 0.67 0.74 0.82 0.85 0.62 0.54 0.71 0.92 0.85
 0.79 0.63 0.59 0.73 0.66 0.74 0.9  0.81].
Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.

This topic was automatically closed 41 days after the last reply. New replies are no longer allowed.