1. 程式人生 > >spark MLlib DataType ML中的數據類型

spark MLlib DataType ML中的數據類型

all color ring tran 稀疏 tor true sna 1.0

package ML.DataType;


import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.linalg.*;
import org.apache.spark.mllib.linalg.distributed.*;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils; import java.util.Arrays; /** * TODO * * @ClassName: DataType * @author: DingH * @since: 2019/4/3 10:06 */ public class DataType { public static void main(String[] args) { SparkConf conf = new SparkConf().setMaster("local").setAppName("Datatype"); JavaSparkContext javaSparkContext
= new JavaSparkContext(conf); /** * @Title: vectors.dense方法生成向量,sparse生成稀疏向量。第一個3是向量的大小,第二個列表是不為0的下表,第三個是對應的value. */ Vector dense = Vectors.dense(1.0, 0.0, 3.0); Vector sparse = Vectors.sparse(3, new int[]{0, 2}, new double[]{1.0, 3.0}); /** * @Title: 對向量進行標記,1.0為正,0.0為負
*/ LabeledPoint labeledPoint = new LabeledPoint(1.0, dense); LabeledPoint labeledPoint1 = new LabeledPoint(0.0, sparse); /** * @Title: libSVM文件: lable1 index1:value1 index2:value2 */ JavaRDD<LabeledPoint> labeledPointJavaRDD = MLUtils.loadLibSVMFile(javaSparkContext.sc(), "/data...").toJavaRDD(); /** * @Title: matricex.dense生成矩陣。3*2的矩陣 * [1.0 3.0 * 5.0 2.0 * 4.0 6.0] */ Matrix dense1 = Matrices.dense(3, 2, new double[]{1.0, 3.0, 5.0, 2.0, 4.0, 6.0}); /** * @Title: matricex.sparse生成稀疏矩陣。3*2的矩陣。第三個參數和第四個參數對應為不為0的元素。 * [9 0 * 0 6 * 8 0] */ Matrix sparse1 = Matrices.sparse(3, 2, new int[]{0, 1, 3}, new int[]{0, 2, 1}, new double[]{9, 6, 8}); /** * @Title: Rowmatrix */ JavaRDD<Vector> parallelize = javaSparkContext.parallelize(Arrays.asList( Vectors.dense(1, 2, 3), Vectors.dense(2, 3, 4), Vectors.dense(3, 4, 5) )); RowMatrix rowMatrix = new RowMatrix(parallelize.rdd()); long l = rowMatrix.numRows(); long l1 = rowMatrix.numCols(); QRDecomposition<RowMatrix, Matrix> rowMatrixMatrixQRDecomposition = rowMatrix.tallSkinnyQR(true); /** * @Title: IndexedRowMatrix */ JavaRDD<IndexedRow> parallelize1 = javaSparkContext.parallelize(Arrays.asList( new IndexedRow(1, dense), new IndexedRow(2, dense), new IndexedRow(3, dense) )); IndexedRowMatrix indexedRowMatrix = new IndexedRowMatrix(parallelize1.rdd()); long l2 = indexedRowMatrix.numCols(); long l3 = indexedRowMatrix.numRows(); RowMatrix rowMatrix1 = indexedRowMatrix.toRowMatrix(); /** * @Title: CoordinateMatrix */ JavaRDD<MatrixEntry> parallelize2 = javaSparkContext.parallelize(Arrays.asList( new MatrixEntry(0, 1, 3), new MatrixEntry(1, 3, 1), new MatrixEntry(2, 1, 1) )); CoordinateMatrix coordinateMatrix = new CoordinateMatrix(parallelize2.rdd()); long l4 = coordinateMatrix.numCols(); long l5 = coordinateMatrix.numRows(); IndexedRowMatrix indexedRowMatrix1 = coordinateMatrix.toIndexedRowMatrix(); /** * @Title: BlocakMatrix */ BlockMatrix cache = indexedRowMatrix.toBlockMatrix().cache(); BlockMatrix cache1 = coordinateMatrix.toBlockMatrix().cache(); cache.validate(); BlockMatrix multiply = cache.transpose().multiply(cache); } }

spark MLlib DataType ML中的數據類型