Computing rank of a row

前端 未结 1 1861
醉梦人生
醉梦人生 2021-02-11 03:11

I want to rank user id based on one field. For the same value of the field, rank should be same. That data is in Hive table.

e.g.

user value
a       5
b          


        
1条回答
  •  臣服心动
    2021-02-11 03:41

    It is possible to use rank window function either with a DataFrame API:

    import org.apache.spark.sql.functions.rank
    import org.apache.spark.sql.expressions.Window
    
    val w = Window.orderBy($"value")
    
    val df = sc.parallelize(Seq(
      ("a", 5), ("b", 10), ("c", 5), ("d", 6)
    )).toDF("user", "value")
    
    df.select($"user", rank.over(w).alias("rank")).show
    
    // +----+----+
    // |user|rank|
    // +----+----+
    // |   a|   1|
    // |   c|   1|
    // |   d|   3|
    // |   b|   4|
    // +----+----+
    

    or raw SQL:

    df.registerTempTable("df")
    sqlContext.sql("SELECT user, RANK() OVER (ORDER BY value) AS rank FROM df").show
    
    // +----+----+
    // |user|rank|
    // +----+----+
    // |   a|   1|
    // |   c|   1|
    // |   d|   3|
    // |   b|   4|
    // +----+----+
    

    but it is extremely inefficient.

    You can also try to use RDD API but it is not exactly straightforward. First lets convert DataFrame to RDD:

    import org.apache.spark.sql.Row
    import org.apache.spark.rdd.RDD
    import org.apache.spark.RangePartitioner
    
    val rdd: RDD[(Int, String)] = df.select($"value", $"user")
      .map{ case Row(value: Int, user: String) => (value, user) }
    
    val partitioner = new RangePartitioner(rdd.partitions.size,  rdd)
    val sorted =  rdd.repartitionAndSortWithinPartitions(partitioner)
    

    Next we have to compute ranks per partition:

    def rank(iter: Iterator[(Int,String)]) =  {
      val zero = List((-1L, Integer.MIN_VALUE, "", 1L))
    
      def f(acc: List[(Long,Int,String,Long)], x: (Int, String)) = 
        (acc.head, x) match {
          case (
            (prevRank: Long, prevValue: Int, _, offset: Long),
            (currValue: Int, label: String)) => {
          val newRank = if (prevValue == currValue) prevRank else prevRank + offset
          val newOffset = if (prevValue == currValue) offset + 1L else 1L
          (newRank, currValue, label, newOffset) :: acc
        }
      }
    
      iter.foldLeft(zero)(f).reverse.drop(1).map{case (rank, _, label, _) =>
        (rank, label)}.toIterator
    }
    
    
    val partRanks = sorted.mapPartitions(rank)
    

    offset for each partition

    def getOffsets(sorted: RDD[(Int, String)]) = sorted
      .mapPartitionsWithIndex((i: Int, iter: Iterator[(Int, String)]) => 
        Iterator((i, iter.size)))
      .collect
      .foldLeft(List((-1, 0)))((acc: List[(Int, Int)], x: (Int, Int)) => 
        (x._1, x._2 + acc.head._2) :: acc)
      .toMap
    
    val offsets = sc.broadcast(getOffsets(sorted))
    

    and the final ranks:

    def adjust(i: Int, iter: Iterator[(Long, String)]) = 
      iter.map{case (rank, label) => (rank + offsets.value(i - 1).toLong, label)}
    
    val ranks = partRanks
      .mapPartitionsWithIndex(adjust)
      .map{case (i, label) => (1 + i , label)}
    

    0 讨论(0)
提交回复
热议问题