1. 程式人生 > >推薦系統1---bandits

推薦系統1---bandits

import numpy as np
import matplotlib.pyplot as plt
import math
#老虎機個數
number_of_bandits=10
#老虎機的臂數
number_of_arms=10
#嘗試數
number_of_pulls=10000
#eps
epsilon=0.3
#最小的decay
min_temp = 0.1
#衰減率
decay_rate=0.999

def pick_arm(q_values,counts,strategy,success,failure):
	global epsilon
	#隨機返回一個臂
	if strategy=="random":
		return np.random.randint(0,len(q_values))
	#貪心演算法,每次都收益最大的那個臂
	if strategy=="greedy":
		best_arms_value = np.max(q_values)
		#返回收益最大的臂的位置,並隨機返回一個臂
		best_arms = np.argwhere(q_values==best_arms_value).flatten()
		return best_arms[np.random.randint(0,len(best_arms))]
	#加epsilon,egreedy中,epsilon不變,egreedy_decay,epsilon變化
	if strategy=="egreedy" or strategy=="egreedy_decay": 
		if  strategy=="egreedy_decay": 
			epsilon=max(epsilon*decay_rate,min_temp)
		if np.random.random() > epsilon:
			best_arms_value = np.max(q_values)
			best_arms = np.argwhere(q_values==best_arms_value).flatten()
			return best_arms[np.random.randint(0,len(best_arms))]
		else:
			return np.random.randint(0,len(q_values))
	#ucb,按照ucb公式,算每個臂的收益,取最大的收益的臂
	if strategy=="ucb":
		total_counts = np.sum(counts)
		q_values_ucb = q_values + np.sqrt(np.reciprocal(counts+0.001)*2*math.log(total_counts+1.0))
		best_arms_value = np.max(q_values_ucb)
		best_arms = np.argwhere(q_values_ucb==best_arms_value).flatten()
		return best_arms[np.random.randint(0,len(best_arms))]
	#thompson,利用beta分佈選擇臂
	if strategy=="thompson":
		sample_means = np.zeros(len(counts))
		for i in range(len(counts)):
			sample_means[i]=np.random.beta(success[i]+1,failure[i]+1)
		return np.argmax(sample_means)


fig = plt.figure()
ax = fig.add_subplot(111)
for st in ["greedy","random","egreedy","egreedy_decay","ucb","thompson"]:

	#定義 bandits個數*拉的次數的陣列
	best_arm_counts = np.zeros((number_of_bandits,number_of_pulls))

	#對於每個老虎機的臂來說
	for i in range(number_of_bandits):
		#隨機一個老虎機的臂的收益w,儲存最大收益
		arm_means = np.random.rand(number_of_arms)
		best_arm = np.argmax(arm_means)
		#初始化臂的收益
		q_values = np.zeros(number_of_arms)
		#初始化臂的拉動次數
		counts = np.zeros(number_of_arms)
		#初始化臂的成功次數
		success=np.zeros(number_of_arms)
		#初始化臂的失敗次數
		failure=np.zeros(number_of_arms)
		
		#對於每次拉動
		for j in range(number_of_pulls):
			#根據不同的策略,選擇臂a
			a = pick_arm(q_values,counts,st,success,failure)
			
			#當前臂a的收益
			reward = np.random.binomial(1,arm_means[a])
			#臂的次數+1
			counts[a]+=1.0
			#更新當前臂的收益
			q_values[a]+= (reward-q_values[a])/counts[a]
			#記錄成功的收益
			success[a]+=reward
			#記錄失敗的收益
			failure[a]+=(1-reward)
			#更新best_arm_counts[i][j]
			best_arm_counts[i][j] = counts[best_arm]*100.0/(j+1)
		epsilon=0.3

	#橫縱座標
	ys = np.mean(best_arm_counts,axis=0)
	xs = range(len(ys))
	ax.plot(xs, ys,label = st)

plt.xlabel('Steps')
plt.ylabel('Optimal pulls')

plt.tight_layout()
plt.legend()
plt.ylim((0,110))
plt.show()