分組聚合使用多程序
阿新 • • 發佈:2018-10-31
1.建立資料。
1 import pandas as pd 2 import numpy as np 3 import uuid 4 import random 5 6 def get_id(): 7 return uuid.uuid1() 8 9 all_data = [] 10 for _ in range(1000000): 11 now_id = get_id() 12 all_data.append([now_id, now_id, 3, 4]) 13 if random.randint(0,1): 14 all_data.append([now_id, now_id, None, None])15 16 17 18 19 data = pd.DataFrame(all_data) 20 data.columns = ['name','age','high','breadth'] 21 print 'done'
2. 分組聚合
1 import time 2 import bottleneck as bk 3 import multiprocessing 4 5 # def do_pool(func, args): 6 # pool = multiprocessing.Pool(2) 7 # pool_res = pool.map(func, args)8 # pool.close() 9 # pool.join() 10 # return pool_res 11 12 # def agg_t(df): 13 # return group.agg(['max']) 14 start = time.time() 15 16 data_grouped = data.groupby(['name','age']).agg([bk.nanmin]) 17 print 'Start aggregation!' 18 19 # tobe_agg = [group for name, group in data_grouped if len(group) > 1]20 21 # print len(tobe_agg) 22 print time.time() -start 23 # do_pool(agg_t,tobe_agg)