spark之AHP層次分析顧客價值得分
阿新 • • 發佈:2020-11-21
一.什麼是AHP
RFM是對顧客價值分群,但是每個群內的顧客並沒有區分價值度。所以AHP就是針對每個群內的顧客進行打分去區分不同價值顧客。
什麼是AHP---------------> https://baike.baidu.com/item/%E5%B1%82%E6%AC%A1%E5%88%86%E6%9E%90%E6%B3%95/1672?fr=aladdin)以及 (https://tellyouwhat.cn/p/ahp-users-value-score/)
AHP(the analytic hierarchy process),層級分析法
為每個使用者計算AHP得分,並根據RFM分群結果進行同類中的客戶排序
1.建立層次結構模型
2.構造成對比較矩陣
3.計算權向量並做一致性檢驗
目標:
針針RFM中同類價值顧客排名
利用RFM模型中的指標R、F、M
為每一個使用者計算AHP得分(根據AHP得分對同類價值顧客進行排名)
二.資料
資料來自:spark之RFM客戶價值分群挖掘(https://www.cnblogs.com/little-horse/p/14014812.html)
三.程式碼(spark3.0,java1.8)
詳細程式碼見,AHP層次分析顧客價值得分(https://github.com/jiangnanboy/spark_tutorial)
/** * RFM聚類可以分為高價值使用者、一般使用者、低價值使用者等。 * 對於RFM中的同類使用者的排序則使用AHP權向量給每個使用者計算最終得分:利用每個使用者的RFM向量與權值向量點乘得出AHP分數 * @param dataset 經過RFM聚類後的資料 *@param weightVector 權重向量 */ public static void ahpScore(Dataset<Row> dataset, List<Double> weightVector) { /** * 計算每個使用者的AHP分值: *+----------+------------------+--------------------+----------+--------------------+ * |customerid| features| scaledfeatures|prediction| ahpscore| * +----------+------------------+--------------------+----------+--------------------+ * | 12940| [46.0,4.0,876.29]|[0.12332439678284...| 1|0.024241021827781713| * | 13285|[23.0,4.0,2709.12]|[0.06166219839142...| 1|0.023847531248595018| * | 13623| [30.0,7.0,672.44]|[0.08042895442359...| 1|0.024049650279212683| * | 13832| [17.0,2.0,40.95]|[0.04557640750670...| 1|0.014321280782467466| * | 14450|[180.0,3.0,483.25]|[0.48257372654155...| 0| 0.04870738944845504| * +----------+------------------+--------------------+----------+--------------------+*/ dataset = dataset.map((MapFunction<Row, Row>) row -> { int customerID = row.getInt(0); Vector featureVec = (Vector) row.get(1); Vector scaledFeatureVec = (Vector) row.get(2); int prediction = row.getInt(3); double aphScore = 0.0; for(int i = 0; i < weightVector.size(); i++) { aphScore += weightVector.get(i) * scaledFeatureVec.apply(i); } return RowFactory.create(customerID, Vectors.dense(new double[]{featureVec.apply(0), featureVec.apply(1), featureVec.apply(2)}), Vectors.dense(new double[]{scaledFeatureVec.apply(0), scaledFeatureVec.apply(1), scaledFeatureVec.apply(2)}), prediction, aphScore); }, RowEncoder.apply(new StructType(new StructField[]{ new StructField("customerid", DataTypes.IntegerType, false, Metadata.empty()),//使用者id new StructField("features", SQLDataTypes.VectorType(),false, Metadata.empty()),//rfm特徵向量 new StructField("scaledfeatures", SQLDataTypes.VectorType(), false, Metadata.empty()),//min-max標準化後的rfm特徵向量 new StructField("prediction", DataTypes.IntegerType, false, Metadata.empty()),//預測該使用者的價值類別 new StructField("ahpscore", DataTypes.DoubleType, false, Metadata.empty())//該使用者的價值得分 }))); /** * 在同類價值使用者中根據ahpscore排序 * +----------+--------------------+--------------------+----------+------------------+----+ * |customerid| features| scaledfeatures|prediction| ahpscore|rank| * +----------+--------------------+--------------------+----------+------------------+----+ * | 14646|[1.0,77.0,279489.02]|[0.00268096514745...| 1|0.7306140418787522| 1| * | 18102|[0.0,62.0,256438.49]|[0.0,0.2469635627...| 1|0.6609787921304062| 2| * | 14911|[1.0,248.0,132572...|[0.00268096514745...| 1|0.5933314030496094| 3| * | 17450|[8.0,55.0,187482.17]|[0.02144772117962...| 1|0.4982050472344627| 4| * | 14156|[9.0,66.0,113384.14]|[0.02412868632707...| 1|0.3430011157923704| 5| * +----------+--------------------+--------------------+----------+------------------+----+ */ dataset = dataset.withColumn("rank", functions.rank().over(Window.partitionBy("prediction").orderBy(col("ahpscore").desc()))); dataset.show(5); }