Google TensorFlow課程 程式設計筆記(6)———邏輯迴歸
阿新 • • 發佈:2019-01-03
邏輯上,前一段時間比較忙,這一段時間比較有空,剛好迴歸學習TensorFlow,哈哈。
開始開始:
第1步:設定:載入必要的庫+載入資料+資料預處理
"""和之前一樣新增必要的庫和工具""" from __future__ import print_function import math from IPython import display from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",") """對特徵資料進行預處理""" def preprocess_features(california_housing_dataframe): """Prepares input features from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the features to be used for the model, including synthetic features. """ selected_features = california_housing_dataframe[ ["latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income"]] processed_features = selected_features.copy() # Create a synthetic feature. processed_features["rooms_per_person"] = ( california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"]) return processed_features """對目標資料進行預處理""" def preprocess_targets(california_housing_dataframe): """Prepares target features (i.e., labels) from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the target feature. """ output_targets = pd.DataFrame() # Create a boolean categorical feature representing whether the # medianHouseValue is above a set threshold. """這裡使用布林分類,目標不再是具體的價格,而是房價是否過高""" output_targets["median_house_value_is_high"] = ( california_housing_dataframe["median_house_value"] > 265000).astype(float) return output_targets california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index))t)的功能是什麼 """對資料進行預覽和檢查""" # Choose the first 12000 (out of 17000) examples for training. training_examples = preprocess_features(california_housing_dataframe.head(12000)) training_targets = preprocess_targets(california_housing_dataframe.head(12000)) # Choose the last 5000 (out of 17000) examples for validation. validation_examples = preprocess_features(california_housing_dataframe.tail(5000)) validation_targets = preprocess_targets(california_housing_dataframe.tail(5000)) # Double-check that we've done the right thing. print("Training examples summary:") display.display(training_examples.describe()) print("Validation examples summary:") display.display(validation_examples.describe()) print("Training targets summary:") display.display(training_targets.describe()) print("Validation targets summary:") display.display(validation_targets.describe())
第2步:建立特徵列和迴歸模型
"""把輸入的特徵轉化成統一的特徵列格式,方便統一運算和和使用""" def construct_feature_columns(input_features): """Construct the TensorFlow Feature Columns. Args: input_features: The names of the numerical input features to use. Returns: A set of feature columns """ return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features]) """定義輸入方程""" def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): """Trains a linear regression model of one feature. Args: features: pandas DataFrame of features targets: pandas DataFrame of targets batch_size: Size of batches to be passed to the model shuffle: True or False. Whether to shuffle the data. num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely Returns: Tuple of (features, labels) for next data batch """ # Convert pandas data into a dict of np arrays. features = {key:np.array(value) for key,value in dict(features).items()} # Construct a dataset, and configure batching/repeating ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit ds = ds.batch(batch_size).repeat(num_epochs) # Shuffle the data, if specified if shuffle: ds = ds.shuffle(10000) # Return the next batch of data features, labels = ds.make_one_shot_iterator().get_next() return features, labels
錯誤示範第三步:這裡嘗試用線性迴歸方程看看情況
def train_linear_regressor_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
"""這裡選擇了線性迴歸模型"""
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
錯誤示範第4步:調整訓練模型引數並輸出RMSE
linear_regressor = train_linear_regressor_model(
learning_rate=0.000001,
steps=200,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
這個結果看起來好像還不錯,不過這裡當我們的損失方程用的是:linenarRegressor,然而在解讀概率時,並不能起到很好的效果,例如: 0.9和0.99999之間的差異會很小。
關於logloss:
所以我們使用logloss會更好一點,由於“ y' ”是對於特徵集“x”的測試值,所以先計算出預測值。
第3步:計算預測值y‘
"""計算預測值"""
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
_ = plt.hist(validation_predictions)
第4步:建立邏輯迴歸模型
def train_linear_classifier_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
periods = 10
steps_per_period = steps / periods
# Create a linear classifier object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss (on training data):")
training_log_losses = []
validation_log_losses = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item['probabilities'] for item in training_probabilities])
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities])
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_log_loss))
# Add the loss metrics from this period to our list.
training_log_losses.append(training_log_loss)
validation_log_losses.append(validation_log_loss)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.tight_layout()
plt.plot(training_log_losses, label="training")
plt.plot(validation_log_losses, label="validation")
plt.legend()
return linear_classifier
第5步:調整模型引數並輸出logloss
"""執行訓練模型"""
linear_classifier = train_linear_classifier_model(
learning_rate=0.000005,
steps=500,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
第6步:計算準確率併為驗證集繪製ROC曲線
"""計算準確率"""
evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn)
print("AUC on the validation set: %0.2f" % evaluation_metrics['auc'])
print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy'])
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
_ = plt.hist(validation_predictions)
第7步:調整訓練模型引數提升準確率
linear_classifier = train_linear_classifier_model(
learning_rate=0.000003,
steps=20000,
batch_size=500,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn)
print("AUC on the validation set: %0.2f" % evaluation_metrics['auc'])
print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy'])
完成!~
本次練習總結:當做概率預測時,邏輯迴歸模型比線性迴歸模型更加合理,可以避免在某些情況下出現超過100%的概率,
而損失方程(Loss Function)方面,logloss方程要比LM方程更加合理。
本文僅為個人學習筆記記錄,若有不詳盡和錯誤的地方,還請諒解。請結合Google 機器學習,程式設計練習:邏輯迴歸一起閱讀程式設計練習地址:https://colab.research.google.com/notebooks/mlcc/logistic_regression.ipynb?hl=zh-cn