I have an RDD with a tuple of values (String, SparseVector) and I want to create a DataFrame using the RDD. To get a (labe
You have to use VectorUDT
here:
# In Spark 1.x
# from pyspark.mllib.linalg import SparseVector, VectorUDT
from pyspark.ml.linalg import SparseVector, VectorUDT
temp_rdd = sc.parallelize([
(0.0, SparseVector(4, {1: 1.0, 3: 5.5})),
(1.0, SparseVector(4, {0: -1.0, 2: 0.5}))])
schema = StructType([
StructField("label", DoubleType(), True),
StructField("features", VectorUDT(), True)
])
temp_rdd.toDF(schema).printSchema()
## root
## |-- label: double (nullable = true)
## |-- features: vector (nullable = true)
Just for completeness Scala equivalent:
import org.apache.spark.sql.Row
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType, StructType}
// In Spark 1x.
// import org.apache.spark.mllib.linalg.{Vectors, VectorUDT}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.linalg.SQLDataTypes.VectorType
val schema = new StructType()
.add("label", DoubleType)
// In Spark 1.x
//.add("features", new VectorUDT())
.add("features",VectorType)
val temp_rdd: RDD[Row] = sc.parallelize(Seq(
Row(0.0, Vectors.sparse(4, Seq((1, 1.0), (3, 5.5)))),
Row(1.0, Vectors.sparse(4, Seq((0, -1.0), (2, 0.5))))
))
spark.createDataFrame(temp_rdd, schema).printSchema
// root
// |-- label: double (nullable = true)
// |-- features: vector (nullable = true)