1. 程式人生 > >利用pandas的box_plot來去除異常值

利用pandas的box_plot來去除異常值

#-*- coding:utf-8 _*-  
""" 
@author:Administrator
@file: standard_process.py
@time: 2018/8/9
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import seaborn as sns
from sklearn.preprocessing import StandardScaler
'''
通過box_plot(盒圖來確認)異常值
'''

# 獲取專案根目錄
input_data_path = os.path.dirname(os.path.dirname(os.getcwd())) + '/input/'
print(input_data_path)

# 獲取資料得位置
month_6_train_path = input_data_path +'month_6_1.csv'
month_6_test_path = input_data_path + 'test_data_6_1.csv'

# 讀取資料
data_train = pd.read_csv(month_6_train_path)
data_test = pd.read_csv(month_6_test_path)

# print(data_train.head())
# print(data_test.head())

# 暫時不考慮省份城市地址
# 月份只有一個月,暫時不考慮
# bedrooms 需要看成分型別得資料
# 只取出longitude,latitude,price,buildingTypeId,bedrooms,daysOnMarket


# 取出這些資料;
# train = data_train[['longitude', 'latitude', 'price', 'buildingTypeId', 'bedrooms', 'daysOnMarket']]
# train= train.dropna()
train = data_test[['longitude', 'latitude', 'price', 'buildingTypeId', 'bedrooms', 'daysOnMarket']]
print(train.head())
# print(test.head())
# print(train.isna().sum())
# sns.pairplot(train)
# # sns.pairplot(test)
# plt.show()


# 特徵清洗:異常值清理用用箱圖;
# 分為兩步走,一步是單列異常值處理,
# 第二步是多列分組異常值處理
def remove_filers_with_boxplot(data):
    p = data.boxplot(return_type='dict')
    for index,value in enumerate(data.columns):
        # 獲取異常值
        fliers_value_list = p['fliers'][index].get_ydata()
        # 刪除異常值
        for flier in fliers_value_list:
            data = data[data.loc[:,value] != flier]
    return data

print(train.shape)
train = remove_filers_with_boxplot(train)
print(train.shape)

'''
以上得異常值處理還不夠完善,
完善的異常值處理是分組判斷異常值,
也就是他在單獨這一列種,還有一種情況是多餘不同的分類,他是不是存在異常
所以就需要用到分組獲取資料再箱圖處理掉異常資料;
'''
train = train[pd.isna(train.buildingTypeId) != True]
print(train.shape)

print(train['bedrooms'].value_counts())
'''
3.0    8760
2.0    5791
4.0    5442
1.0    2056
5.0    1828
6.0     429
0.0     159
7.0      82
由於樣本存在不均衡得問題:所以只採用12345資料:也就是說去掉0,7,6,到時候測試資料也要做相同得操作;
還有一種是通過下采樣或者是上取樣的方式進行,這裡暫時不考慮;
'''
# 只取bedrooms 為1,2,3,4,5 得資料
train = train[train['bedrooms'].isin([1,2,3,4,5])]
print(train.shape)


# 利用pivot分組後去掉異常點
def use_pivot_box_to_remove_fliers(data,pivot_columns_list,pivot_value_list):
    for column in pivot_columns_list:
        for value in pivot_value_list:
            # 獲取分組的dataframe
            new_data = data.pivot(columns=column,values=value)
            p = new_data.boxplot(return_type='dict')
            for index,value_new in enumerate(new_data.columns):
                # 獲取異常值
                fliers_value_list = p['fliers'][index].get_ydata()
                # 刪除異常值
                for flier in fliers_value_list:
                    data = data[data.loc[:, value] != flier]
    return data


# train = use_pivot_box_to_remove_fliers(train,['buildingTypeId','bedrooms'],['price','daysOnMarket','longitude','latitude'])
print(train.shape)
# print(train.isna().sum())

# 以上就不考慮longitude和latitude的問題了;應為房屋的型別以及房間個數和經緯度關係不大,但是也不一定,
# 實踐了一下加上longitude和latitude之後樣本資料並沒有減少;

# sns.pairplot(train)
# plt.show()

# 先進一步做處理將緯度小於40的去掉
train = train[train.latitude>40]

# --------------------------------》》》
# 對於數值型別得用均值填充,但是在填充之前注意一些原本就是分型別資料得列
# def fill_na(data):
#     for column in data.columns:
#         if column.dtype != str:
#             data[column].fillna(data[column].mean())
#     return data

# 以上是異常值,或者是離群點的處理,以及均值填充資料
# 下面將根據catter圖或者是hist圖來處理資料


# # 標準化資料
# train = StandardScaler().fit_transform(train)
# # 標準化之後畫圖發現數據分佈並沒有變
#
# sns.pairplot(pd.DataFrame(train))
# plt.show()

'''
1:迴圈遍歷整個散點圖用剛才寫好的演算法去除點;
'''

# 獲取
# def get_outlier(x,y,init_point_count ,distance,least_point_count):
#     x_outliers_list = []
#     y_outliers_list = []
#     for i in range(len(x)):
#         for j in range(len(x)):
#              d =np.sqrt(np.square(x[i]-x[j])+np.square(y[i]-y[j]))
#              # print('距離',d)
#              if d <= distance:
#                 init_point_count +=1
#         if init_point_count <least_point_count+1:
#             x_outliers_list.append(x[i])
#             y_outliers_list.append(y[i])
#             print(x[i],y[i])
#         init_point_count =0
#     return x_outliers_list,y_outliers_list
#
# def circulation_to_remove_outliers(data,list_columns=['longitude','latitude','price','daysOnMarket',]):
#     for column_row in list_columns:
#         for column_col in list_columns:
#             if column_row != column_col:
#                 x = list(data[column_row])
#                 y = list(data[column_col])
#                 x_outliers_list ,y_outliers_list = get_outlier(x,y,0,0.01,2)
#                 for x_outlier in x_outliers_list:
#                     data = data[data.loc[:, column_row] != x_outlier]
#                 for y_outlier in y_outliers_list:
#                     data = data[data.loc[:, column_col] != y_outlier]
#     return data
#
# train = circulation_to_remove_outliers(train)
#
# print(train.shape)






# def get_outlier(x,y,init_point_count ,distance,least_point_count):
#     for i in range(len(x)):
#         for j in range(len(x)):
#              d =np.sqrt(np.square(x[i]-x[j])+np.square(y[i]-y[j]))
#              # print('距離',d)
#              if d <= distance:
#                 init_point_count +=1
#         if init_point_count <least_point_count+1:
#             print(x[i],y[i])
#         init_point_count =0
#
# get_outlier(train['longitude'],train['latitude'],0,0.3,1)








# sns.pairplot(train)
# plt.show()
# train = train.dropna()
# print(train.tail())
# train.to_csv('./finnl_processing_train_data_6_no_remove_outliers_test.csv',index=False)