資料清洗和準備1
阿新 • • 發佈:2018-12-18
#資料清洗和準備
import pandas as pd
import numpy as np
#處理缺失值
string_data = pd.Series(['aardvark','artwdfv',np.nan,'asdfaa'])
string_data
0 aardvark
1 artwdfv
2 NaN
3 asdfaa
dtype: object
string_data.isnull()
0 False
1 False
2 True
3 False
dtype: bool
string_data[0] = None
string_data.isnull()
0 True
1 False
2 True
3 False
dtype: bool
#缺失資料處理
#濾除缺失資料
#dropna返回一個僅含非空資料和索引值的Series:
from numpy import nan as NA
data = pd.Series([1,NA,3,4,NA,7])
data.dropna()
0 1.0
2 3.0
3 4.0
5 7.0
dtype: float64
data[data.notnull()]
0 1.0 2 3.0 3 4.0 5 7.0 dtype: float64
#對於DataFrame物件,dropna預設丟棄任何含有缺失值的行
data = pd.DataFrame([[1., 6.5, 3.], [1., NA, NA],
[NA, NA, NA], [NA, 6.5, 3.]])
cleaned = data.dropna()
data
0 | 1 | 2 | |
---|---|---|---|
0 | 1.0 | 6.5 | 3.0 |
1 | 1.0 | NaN | NaN |
2 | NaN | NaN | NaN |
3 | NaN | 6.5 | 3.0 |
cleaned
0 | 1 | 2 | |
---|---|---|---|
0 | 1.0 | 6.5 | 3.0 |
#傳入how='all'將只丟棄全為NA的那些行
data.dropna(how='all')
0 | 1 | 2 | |
---|---|---|---|
0 | 1.0 | 6.5 | 3.0 |
1 | 1.0 | NaN | NaN |
3 | NaN | 6.5 | 3.0 |
data[4] = NA
data
0 | 1 | 2 | 4 | |
---|---|---|---|---|
0 | 1.0 | 6.5 | 3.0 | NaN |
1 | 1.0 | NaN | NaN | NaN |
2 | NaN | NaN | NaN | NaN |
3 | NaN | 6.5 | 3.0 | NaN |
data.dropna(axis=1,how='all') #指定列
0 | 1 | 2 | |
---|---|---|---|
0 | 1.0 | 6.5 | 3.0 |
1 | 1.0 | NaN | NaN |
2 | NaN | NaN | NaN |
3 | NaN | 6.5 | 3.0 |
df = pd.DataFrame(np.random.randn(7,3))
df.iloc[:4,1] = NA
df.iloc[:2,2] = NA
df
0 | 1 | 2 | |
---|---|---|---|
0 | 0.468787 | NaN | NaN |
1 | 0.903261 | NaN | NaN |
2 | 1.453601 | NaN | 1.693059 |
3 | 1.053961 | NaN | -0.147527 |
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
df.dropna()
0 | 1 | 2 | |
---|---|---|---|
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
df.dropna(thresh=2)#thresh=N要求一列至少具有N非NaN才能存活
0 | 1 | 2 | |
---|---|---|---|
2 | 1.453601 | NaN | 1.693059 |
3 | 1.053961 | NaN | -0.147527 |
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
#填充缺失資料 fillna
df.fillna(0)
0 | 1 | 2 | |
---|---|---|---|
0 | 0.468787 | 0.000000 | 0.000000 |
1 | 0.903261 | 0.000000 | 0.000000 |
2 | 1.453601 | 0.000000 | 1.693059 |
3 | 1.053961 | 0.000000 | -0.147527 |
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
df.fillna({1:0,2:0.5})
0 | 1 | 2 | |
---|---|---|---|
0 | 0.468787 | 0.000000 | 0.500000 |
1 | 0.903261 | 0.000000 | 0.500000 |
2 | 1.453601 | 0.000000 | 1.693059 |
3 | 1.053961 | 0.000000 | -0.147527 |
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
_ = df.fillna(0,inplace=True)#對現有物件進行就地修改
df
0 | 1 | 2 | |
---|---|---|---|
0 | 0.468787 | 0.000000 | 0.000000 |
1 | 0.903261 | 0.000000 | 0.000000 |
2 | 1.453601 | 0.000000 | 1.693059 |
3 | 1.053961 | 0.000000 | -0.147527 |
4 | 0.405867 | 1.042093 | -1.693640 |
5 | -0.416778 | -0.802466 | 2.841372 |
6 | 0.348987 | -1.585632 | 0.061224 |
df = pd.DataFrame(np.random.randn(6,3))
df.iloc[2:,1] = NA
df.iloc[4:,2] = NA
df
0 | 1 | 2 | |
---|---|---|---|
0 | 1.813182 | 2.118317 | 0.654455 |
1 | 0.404148 | 0.387881 | -0.082305 |
2 | 0.841433 | NaN | -0.922404 |
3 | -0.569958 | NaN | 1.136830 |
4 | 1.007093 | NaN | NaN |
5 | 1.725698 | NaN | NaN |
df.fillna(method='ffill') #對reindexing有效的那些插值方法也可用於fillna
0 | 1 | 2 | |
---|---|---|---|
0 | 1.813182 | 2.118317 | 0.654455 |
1 | 0.404148 | 0.387881 | -0.082305 |
2 | 0.841433 | 0.387881 | -0.922404 |
3 | -0.569958 | 0.387881 | 1.136830 |
4 | 1.007093 | 0.387881 | 1.136830 |
5 | 1.725698 | 0.387881 | 1.136830 |
#資料轉換
#移除重複資料
data = data = pd.DataFrame({'k1': ['one', 'two'] * 3 + ['two'],
'k2': [1, 1, 2, 3, 3, 4, 4]})
data
k1 | k2 | |
---|---|---|
0 | one | 1 |
1 | two | 1 |
2 | one | 2 |
3 | two | 3 |
4 | one | 3 |
5 | two | 4 |
6 | two | 4 |
data.duplicated()
0 False
1 False
2 False
3 False
4 False
5 False
6 True
dtype: bool
data.drop_duplicates()
k1 | k2 | |
---|---|---|
0 | one | 1 |
1 | two | 1 |
2 | one | 2 |
3 | two | 3 |
4 | one | 3 |
5 | two | 4 |
data['v1'] = range(7)
data.drop_duplicates(['k1'])
k1 | k2 | v1 | |
---|---|---|---|
0 | one | 1 | 0 |
1 | two | 1 | 1 |
data.drop_duplicates(['k1','k2'],keep='last')#傳入keep='last'則保留最後一個:
k1 | k2 | v1 | |
---|---|---|---|
0 | one | 1 | 0 |
1 | two | 1 | 1 |
2 | one | 2 | 2 |
3 | two | 3 | 3 |
4 | one | 3 | 4 |
6 | two | 4 | 6 |
#利用函式或對映進行資料轉換
data = pd.DataFrame({'food': ['bacon', 'pulled pork', 'bacon',
'Pastrami', 'corned beef', 'Bacon',
'pastrami', 'honey ham', 'nova lox'],
'ounces': [4, 3, 12, 6, 7.5, 8, 3, 5, 6]})
data
food | ounces | |
---|---|---|
0 | bacon | 4.0 |
1 | pulled pork | 3.0 |
2 | bacon | 12.0 |
3 | Pastrami | 6.0 |
4 | corned beef | 7.5 |
5 | Bacon | 8.0 |
6 | pastrami | 3.0 |
7 | honey ham | 5.0 |
8 | nova lox | 6.0 |
meat_to_animal = {
'bacon': 'pig',
'pulled pork': 'pig',
'pastrami': 'cow',
'corned beef': 'cow',
'honey ham': 'pig',
'nova lox': 'salmon'
}
lowercased = data['food'].str.lower()
lowercased
0 bacon
1 pulled pork
2 bacon
3 pastrami
4 corned beef
5 bacon
6 pastrami
7 honey ham
8 nova lox
Name: food, dtype: object
data['animal'] = lowercased.map(meat_to_animal)#Series的map方法可以接受一個函式或含有對映關係的字典型物件
data
food | ounces | animal | |
---|---|---|---|
0 | bacon | 4.0 | pig |
1 | pulled pork | 3.0 | pig |
2 | bacon | 12.0 | pig |
3 | Pastrami | 6.0 | cow |
4 | corned beef | 7.5 | cow |
5 | Bacon | 8.0 | pig |
6 | pastrami | 3.0 | cow |
7 | honey ham | 5.0 | pig |
8 | nova lox | 6.0 | salmon |
data['food'].map(lambda x :meat_to_animal[x.lower()])
0 pig
1 pig
2 pig
3 cow
4 cow
5 pig
6 cow
7 pig
8 salmon
Name: food, dtype: object
#替換值
data = pd.Series([1,-999,2,-999,-1000,3])
data
0 1
1 -999
2 2
3 -999
4 -1000
5 3
dtype: int64
data.replace(-999,np.nan)
0 1.0
1 NaN
2 2.0
3 NaN
4 -1000.0
5 3.0
dtype: float64
data.replace([-999,-1000],np.nan)
0 1.0
1 NaN
2 2.0
3 NaN
4 NaN
5 3.0
dtype: float64
data.replace({-999:np.nan,-1000:0})
0 1.0
1 NaN
2 2.0
3 NaN
4 0.0
5 3.0
dtype: float64
#重新命名軸索引
data = pd.DataFrame(np.arange(12).reshape((3, 4)),
index=['Ohio', 'Colorado', 'New York'],
columns=['one', 'two', 'three', 'four'])
transform = lambda x:x[:4].upper()
data.index.map(transform)
Index(['OHIO', 'COLO', 'NEW '], dtype='object')
data.index = data.index.map(transform)
data
one | two | three | four | |
---|---|---|---|---|
OHIO | 0 | 1 | 2 | 3 |
COLO | 4 | 5 | 6 | 7 |
NEW | 8 | 9 | 10 | 11 |
data.rename(index=str.title,columns=str.upper)
ONE | TWO | THREE | FOUR | |
---|---|---|---|---|
Ohio | 0 | 1 | 2 | 3 |
Colo | 4 | 5 | 6 | 7 |
New | 8 | 9 | 10 | 11 |
data.rename(index={'OHIO': 'INDIANA'},
columns={'three': 'peekaboo'})
one | two | peekaboo | four | |
---|---|---|---|---|
INDIANA | 0 | 1 | 2 | 3 |
COLO | 4 | 5 | 6 | 7 |
NEW | 8 | 9 | 10 | 11 |
#rename可以實現複製DataFrame並對其索引和列標籤進行賦值
#修改某個資料集,傳入inplace=True即可
data.rename(index={'OHIO': 'INDIANA'}, inplace=True)
data
one | two | three | four | |
---|---|---|---|---|
INDIANA | 0 | 1 | 2 | 3 |
COLO | 4 | 5 | 6 | 7 |
NEW | 8 | 9 | 10 | 11 |
#離散化和麵元劃分
ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32]
bins = [18,25,35,60,100]
cats = pd.cut(ages,bins)
cats
[(18, 25], (18, 25], (18, 25], (25, 35], (18, 25], ..., (25, 35], (60, 100], (35, 60], (35, 60], (25, 35]]
Length: 12
Categories (4, interval[int64]): [(18, 25] < (25, 35] < (35, 60] < (60, 100]]
cats.codes
array([0, 0, 0, 1, 0, 0, 2, 1, 3, 2, 2, 1], dtype=int8)
cats.categories
IntervalIndex([(18, 25], (25, 35], (35, 60], (60, 100]]
closed='right',
dtype='interval[int64]')
pd.value_counts(cats)
(18, 25] 5
(35, 60] 3
(25, 35] 3
(60, 100] 1
dtype: int64
pd.cut(ages,[18, 26, 36, 61, 100],right=False) #修改開閉端
[[18, 26), [18, 26), [18, 26), [26, 36), [18, 26), ..., [26, 36), [61, 100), [36, 61), [36, 61), [26, 36)]
Length: 12
Categories (4, interval[int64]): [[18, 26) < [26, 36) < [36, 61) < [61, 100)]
group_names = ['Youth', 'YoungAdult', 'MiddleAged', 'Senior']
pd.cut(ages,bins,labels=group_names)#設定自己的面元名稱
[Youth, Youth, Youth, YoungAdult, Youth, ..., YoungAdult, Senior, MiddleAged, MiddleAged, YoungAdult]
Length: 12
Categories (4, object): [Youth < YoungAdult < MiddleAged < Senior]
data = np.random.randn(20)
data
array([ 1.91724059, 0.71063941, -0.61160619, -0.83774853, -0.30427484,
-0.13651668, 0.12231811, 1.02349581, 0.44230242, 2.5811469 ,
0.84007075, -0.40956094, 1.87198738, -1.69861267, -0.52190509,
-0.1944561 , -0.44986769, 0.64421648, 1.96899093, 0.04159415])
#資料的最小值和最大值計算等長面元。下面這個例子中,我們將一些均勻分佈的資料分成四組
pd.cut(data,4,precision=2) #選項precision=2,限定小數只有兩位
[(1.51, 2.58], (0.44, 1.51], (-0.63, 0.44], (-1.7, -0.63], (-0.63, 0.44], ..., (-0.63, 0.44], (-0.63, 0.44], (0.44, 1.51], (1.51, 2.58], (-0.63, 0.44]]
Length: 20
Categories (4, interval[float64]): [(-1.7, -0.63] < (-0.63, 0.44] < (0.44, 1.51] < (1.51, 2.58]]
#qcut根據樣本分位數對資料進行面元劃分
data = np.random.randn(1000)
cats = pd.qcut(data,4) #四分位點
cats
[(-0.65, 0.0814], (-0.65, 0.0814], (0.0814, 0.727], (0.0814, 0.727], (-2.875, -0.65], ..., (0.0814, 0.727], (-2.875, -0.65], (-0.65, 0.0814], (-0.65, 0.0814], (-0.65, 0.0814]]
Length: 1000
Categories (4, interval[float64]): [(-2.875, -0.65] < (-0.65, 0.0814] < (0.0814, 0.727] < (0.727, 3.834]]
pd.value_counts(cats)
(0.727, 3.834] 250
(0.0814, 0.727] 250
(-0.65, 0.0814] 250
(-2.875, -0.65] 250
dtype: int64
#傳遞自定義的分位數
pd.qcut(data,[0, 0.1, 0.5, 0.9, 1.])
[(-1.237, 0.0814], (-1.237, 0.0814], (0.0814, 1.324], (0.0814, 1.324], (-2.875, -1.237], ..., (0.0814, 1.324], (-1.237, 0.0814], (-1.237, 0.0814], (-1.237, 0.0814], (-1.237, 0.0814]]
Length: 1000
Categories (4, interval[float64]): [(-2.875, -1.237] < (-1.237, 0.0814] < (0.0814, 1.324] < (1.324, 3.834]]
#檢測和過濾異常值
data = pd.DataFrame(np.random.randn(1000,4))
data.describe()
0 | 1 | 2 | 3 | |
---|---|---|---|---|
count | 1000.000000 | 1000.000000 | 1000.000000 | 1000.000000 |
mean | -0.088724 | 0.021011 | 0.043887 | 0.006012 |
std | 0.990026 | 0.982459 | 0.970484 | 1.013532 |
min | -3.417757 | -3.501364 | -2.653510 | -3.266161 |
25% | -0.722939 | -0.618738 | -0.637500 | -0.723452 |
50% | -0.070858 | 0.047673 | 0.011295 | 0.017201 |
75% | 0.578929 | 0.689053 | 0.735396 | 0.685065 |
max | 2.695907 | 3.217885 | 3.304064 | 3.158566 |
col = data[2]
col[np.abs(col)>3]
583 3.304064
Name: 2, dtype: float64
data[(np.abs(data) > 3).any(1)]
0 | 1 | 2 | 3 | |
---|---|---|---|---|
37 | -0.327884 | 2.157466 | -0.043636 | 3.073042 |
152 | -3.417757 | -0.061750 | -0.935451 | -0.627025 |
175 | 0.578744 | -0.562655 | -1.122764 | 3.140705 |
232 | -3.108754 | 0.673518 | 0.165646 | 0.924763 |
292 | 1.270998 | 3.217885 | 0.172434 | -0.872227 |
417 | 0.705947 | -0.002233 | 1.380826 | -3.266161 |
487 | -3.008020 | -0.298071 | -0.048238 | 0.680068 |
512 | 0.165514 | -3.501364 | -1.157821 | 0.817954 |
583 | -1.525473 | -1.329746 | 3.304064 | -2.202428 |
813 | -0.230513 | 0.459634 | 0.130212 | 3.158566 |
#排列和隨機取樣
df = pd.DataFrame(np.arange(5*4).reshape(5,4))
sampler = np.random.permutation(5) #隨機重排序
sampler
array([2, 3, 0, 1, 4])
df
0 | 1 | 2 | 3 | |
---|---|---|---|---|
0 | 0 | 1 | 2 | 3 |
1 | 4 | 5 | 6 | 7 |
2 | 8 | 9 | 10 | 11 |
3 | 12 | 13 | 14 | 15 |
4 | 16 | 17 | 18 | 19 |
df.take(sampler)
0 | 1 | 2 | 3 | |
---|---|---|---|---|
2 | 8 | 9 | 10 | 11 |
3 | 12 | 13 | 14 | 15 |
0 | 0 | 1 | 2 | 3 |
1 | 4 | 5 | 6 | 7 |
4 | 16 | 17 | 18 | 19 |
df.sample(n=3)#不用替換的方式選取隨機子集
0 | 1 | 2 | 3 | |
---|---|---|---|---|
0 | 0 | 1 | 2 | 3 |
3 | 12 | 13 | 14 | 15 |
1 | 4 | 5 | 6 | 7 |
choices = pd.Series([5, 7, -1, 6, 4])
draws = choices.sample(n=10,replace=True)#允許重複選擇
draws
0 5
0 5
1 7
3 6
2 -1
1 7
4 4
2 -1
2 -1
1 7
dtype: int64
#計算指標/啞變數:將分類變數(categorical variable)轉換為“啞變數”或“指標矩陣”。
df = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'],
'data1': range(6)})
df
key | data1 | |
---|---|---|
0 | b | 0 |
1 | b | 1 |
2 | a | 2 |
3 | c | 3 |
4 | a | 4 |
5 | b | 5 |
pd.get_dummies(df['key'])
a | b | c | |
---|---|---|---|
0 | 0 | 1 | 0 |
1 | 0 | 1 | 0 |
2 | 1 | 0 | 0 |
3 | 0 | 0 | 1 |
4 | 1 | 0 | 0 |
5 | 0 | 1 | 0 |
#給指標DataFrame的列加上一個字首
dummies = pd.get_dummies(df['key'], prefix='key')
df_with_dummy = df[['data1']].join(dummies)
df_with_dummy
data1 | key_a | key_b | key_c | |
---|---|---|---|---|
0 | 0 | 0 | 1 | 0 |
1 | 1 | 0 | 1 | 0 |
2 | 2 | 1 | 0 | 0 |
3 | 3 | 0 | 0 | 1 |
4 | 4 | 1 | 0 | 0 |
5 | 5 | 0 | 1 | 0 |