web-dev-qa-db-ja.com

ネストされた列をSpark DataFrameから削除する

スキーマを含むDataFrameを持っています

_root
 |-- label: string (nullable = true)
 |-- features: struct (nullable = true)
 |    |-- feat1: string (nullable = true)
 |    |-- feat2: string (nullable = true)
 |    |-- feat3: string (nullable = true)
_

一方、私は使用してデータフレームをフィルタリングすることができます

_  val data = rawData
     .filter( !(rawData("features.feat1") <=> "100") )
_

を使用して列を削除できません

_  val data = rawData
       .drop("features.feat1")
_

ここで私が間違っていることですか? drop(rawData("features.feat1"))も(失敗して)試しましたが、あまり意味がありません。

前もって感謝します、

ニキル

22
Nikhil J Joshi

これは単なるプログラミングの練習ですが、次のようなことを試すことができます。

import org.Apache.spark.sql.{DataFrame, Column}
import org.Apache.spark.sql.types.{StructType, StructField}
import org.Apache.spark.sql.{functions => f}
import scala.util.Try

case class DFWithDropFrom(df: DataFrame) {
  def getSourceField(source: String): Try[StructField] = {
    Try(df.schema.fields.filter(_.name == source).head)
  }

  def getType(sourceField: StructField): Try[StructType] = {
    Try(sourceField.dataType.asInstanceOf[StructType])
  }

  def genOutputCol(names: Array[String], source: String): Column = {
    f.struct(names.map(x => f.col(source).getItem(x).alias(x)): _*)
  }

  def dropFrom(source: String, toDrop: Array[String]): DataFrame = {
    getSourceField(source)
      .flatMap(getType)
      .map(_.fieldNames.diff(toDrop))
      .map(genOutputCol(_, source))
      .map(df.withColumn(source, _))
      .getOrElse(df)
  }
}

使用例:

scala> case class features(feat1: String, feat2: String, feat3: String)
defined class features

scala> case class record(label: String, features: features)
defined class record

scala> val df = sc.parallelize(Seq(record("a_label",  features("f1", "f2", "f3")))).toDF
df: org.Apache.spark.sql.DataFrame = [label: string, features: struct<feat1:string,feat2:string,feat3:string>]

scala> DFWithDropFrom(df).dropFrom("features", Array("feat1")).show
+-------+--------+
|  label|features|
+-------+--------+
|a_label| [f2,f3]|
+-------+--------+


scala> DFWithDropFrom(df).dropFrom("foobar", Array("feat1")).show
+-------+----------+
|  label|  features|
+-------+----------+
|a_label|[f1,f2,f3]|
+-------+----------+


scala> DFWithDropFrom(df).dropFrom("features", Array("foobar")).show
+-------+----------+
|  label|  features|
+-------+----------+
|a_label|[f1,f2,f3]|
+-------+----------+

暗黙の変換 を追加すれば、問題ありません。

25
zero323

このバージョンでは、任意のレベルでネストされた列を削除できます。

import org.Apache.spark.sql._
import org.Apache.spark.sql.functions._
import org.Apache.spark.sql.types.{StructType, DataType}

/**
  * Various Spark utilities and extensions of DataFrame
  */
object DataFrameUtils {

  private def dropSubColumn(col: Column, colType: DataType, fullColName: String, dropColName: String): Option[Column] = {
    if (fullColName.equals(dropColName)) {
      None
    } else {
      colType match {
        case colType: StructType =>
          if (dropColName.startsWith(s"${fullColName}.")) {
            Some(struct(
              colType.fields
                .flatMap(f =>
                  dropSubColumn(col.getField(f.name), f.dataType, s"${fullColName}.${f.name}", dropColName) match {
                    case Some(x) => Some(x.alias(f.name))
                    case None => None
                  })
                : _*))
          } else {
            Some(col)
          }
        case other => Some(col)
      }
    }
  }

  protected def dropColumn(df: DataFrame, colName: String): DataFrame = {
    df.schema.fields
      .flatMap(f => {
        if (colName.startsWith(s"${f.name}.")) {
          dropSubColumn(col(f.name), f.dataType, f.name, colName) match {
            case Some(x) => Some((f.name, x))
            case None => None
          }
        } else {
          None
        }
      })
      .foldLeft(df.drop(colName)) {
        case (df, (colName, column)) => df.withColumn(colName, column)
      }
  }

  /**
    * Extended version of DataFrame that allows to operate on nested fields
    */
  implicit class ExtendedDataFrame(df: DataFrame) extends Serializable {
    /**
      * Drops nested field from DataFrame
      *
      * @param colName Dot-separated nested field name
      */
    def dropNestedColumn(colName: String): DataFrame = {
      DataFrameUtils.dropColumn(df, colName)
    }
  }
}

使用法:

import DataFrameUtils._
df.dropNestedColumn("a.b.c.d")
16
Michael Spector

Spektom答えを拡張します。配列型のサポートあり:

object DataFrameUtils {

  private def dropSubColumn(col: Column, colType: DataType, fullColName: String, dropColName: String): Option[Column] = {
    if (fullColName.equals(dropColName)) {
      None
    } else if (dropColName.startsWith(s"$fullColName.")) {
      colType match {
        case colType: StructType =>
          Some(struct(
            colType.fields
              .flatMap(f =>
                dropSubColumn(col.getField(f.name), f.dataType, s"$fullColName.${f.name}", dropColName) match {
                  case Some(x) => Some(x.alias(f.name))
                  case None => None
                })
              : _*))
        case colType: ArrayType =>
          colType.elementType match {
            case innerType: StructType =>
              Some(struct(innerType.fields
                .flatMap(f =>
                  dropSubColumn(col.getField(f.name), f.dataType, s"$fullColName.${f.name}", dropColName) match {
                    case Some(x) => Some(x.alias(f.name))
                    case None => None
                  })
                : _*))
          }

        case other => Some(col)
      }
    } else {
      Some(col)
    }
  }

  protected def dropColumn(df: DataFrame, colName: String): DataFrame = {
    df.schema.fields
      .flatMap(f => {
        if (colName.startsWith(s"${f.name}.")) {
          dropSubColumn(col(f.name), f.dataType, f.name, colName) match {
            case Some(x) => Some((f.name, x))
            case None => None
          }
        } else {
          None
        }
      })
      .foldLeft(df.drop(colName)) {
        case (df, (colName, column)) => df.withColumn(colName, column)
      }
  }

  /**
    * Extended version of DataFrame that allows to operate on nested fields
    */
  implicit class ExtendedDataFrame(df: DataFrame) extends Serializable {
    /**
      * Drops nested field from DataFrame
      *
      * @param colName Dot-separated nested field name
      */
    def dropNestedColumn(colName: String): DataFrame = {
      DataFrameUtils.dropColumn(df, colName)
    }
  }

}
4

Spektomのscalaのコードスニペットに続いて、Javaで同様のコードを作成しました。 Java 8にはfoldLeftがないため、forEachOrderedを使用しました。このコードはspark 2.xに適しています(2.1を使用しています))また、列をドロップして、同じ名前のwithColumnを使用して追加することは機能しないので、列を置き換えるだけで機能します。

コードは完全にはテストされていません。うまくいくことを願っています:-)

public class DataFrameUtils {

public static Dataset<Row> dropNestedColumn(Dataset<Row> dataFrame, String columnName) {
    final DataFrameFolder dataFrameFolder = new DataFrameFolder(dataFrame);
    Arrays.stream(dataFrame.schema().fields())
        .flatMap( f -> {
           if (columnName.startsWith(f.name() + ".")) {
               final Optional<Column> column = dropSubColumn(col(f.name()), f.dataType(), f.name(), columnName);
               if (column.isPresent()) {
                   return Stream.of(new Tuple2<>(f.name(), column));
               } else {
                   return Stream.empty();
               }
           } else {
               return Stream.empty();
           }
        }).forEachOrdered(colTuple -> dataFrameFolder.accept(colTuple));

    return dataFrameFolder.getDF();
}

private static Optional<Column> dropSubColumn(Column col, DataType colType, String fullColumnName, String dropColumnName) {
    Optional<Column> column = Optional.empty();
    if (!fullColumnName.equals(dropColumnName)) {
        if (colType instanceof StructType) {
            if (dropColumnName.startsWith(fullColumnName + ".")) {
                column = Optional.of(struct(getColumns(col, (StructType)colType, fullColumnName, dropColumnName)));
            }
        } else {
            column = Optional.of(col);
        }
    }

    return column;
}

private static Column[] getColumns(Column col, StructType colType, String fullColumnName, String dropColumnName) {
    return Arrays.stream(colType.fields())
        .flatMap(f -> {
                    final Optional<Column> column = dropSubColumn(col.getField(f.name()), f.dataType(),
                            fullColumnName + "." + f.name(), dropColumnName);
                    if (column.isPresent()) {
                        return Stream.of(column.get().alias(f.name()));
                    } else {
                        return Stream.empty();
                    }
                }
        ).toArray(Column[]::new);

}

private static class DataFrameFolder implements Consumer<Tuple2<String, Optional<Column>>> {
    private Dataset<Row> df;

    public DataFrameFolder(Dataset<Row> df) {
        this.df = df;
    }

    public Dataset<Row> getDF() {
        return df;
    }

    @Override
    public void accept(Tuple2<String, Optional<Column>> colTuple) {
        if (!colTuple._2().isPresent()) {
            df = df.drop(colTuple._1());
        } else {
            df = df.withColumn(colTuple._1(), colTuple._2().get());
        }
    }
}

使用例:

private class Pojo {
    private String str;
    private Integer number;
    private List<String> strList;
    private Pojo2 pojo2;

    public String getStr() {
        return str;
    }

    public Integer getNumber() {
        return number;
    }

    public List<String> getStrList() {
        return strList;
    }

    public Pojo2 getPojo2() {
        return pojo2;
    }

}

private class Pojo2 {
    private String str;
    private Integer number;
    private List<String> strList;

    public String getStr() {
        return str;
    }

    public Integer getNumber() {
        return number;
    }

    public List<String> getStrList() {
        return strList;
    }

}

SQLContext context = new SQLContext(new SparkContext("local[1]", "test"));
Dataset<Row> df = context.createDataFrame(Collections.emptyList(), Pojo.class);
Dataset<Row> dfRes = DataFrameUtils.dropNestedColumn(df, "pojo2.str");

元の構造:

root
 |-- number: integer (nullable = true)
 |-- pojo2: struct (nullable = true)
 |    |-- number: integer (nullable = true)
 |    |-- str: string (nullable = true)
 |    |-- strList: array (nullable = true)
 |    |    |-- element: string (containsNull = true)
 |-- str: string (nullable = true)
 |-- strList: array (nullable = true)
 |    |-- element: string (containsNull = true)

ドロップ後:

root
 |-- number: integer (nullable = true)
 |-- pojo2: struct (nullable = false)
 |    |-- number: integer (nullable = true)
 |    |-- strList: array (nullable = true)
 |    |    |-- element: string (containsNull = true)
 |-- str: string (nullable = true)
 |-- strList: array (nullable = true)
 |    |-- element: string (containsNull = true)
2
Lior Chaga

別の(PySpark)方法は、featuresを再度作成してfeatures.feat1列を削除することです。

from pyspark.sql.functions import col, arrays_Zip

display(df
        .withColumn("features", arrays_Zip("features.feat2", "features.feat3"))
        .withColumn("features", col("features").cast(schema))
)

ここで、schemaは新しいスキーマです(features.feat1を除く)。

from pyspark.sql.types import StructType, StructField, StringType

schema = StructType(
    [
      StructField('feat2', StringType(), True), 
      StructField('feat3', StringType(), True), 
    ]
  )
1
kiae

Make Structs Easy *ライブラリを使用すると、ネストされたデータ構造内のフィールドの追加、削除、名前変更などの操作を簡単に実行できます。ライブラリは、ScalaとPythonの両方で利用できます。

次のデータがあると仮定します。

import org.Apache.spark.sql.functions._

case class Features(feat1: String, feat2: String, feat3: String)
case class Record(features: Features, arrayOfFeatures: Seq[Features])

val df = Seq(
   Record(Features("hello", "world", "!"), Seq(Features("red", "orange", "yellow"), Features("green", "blue", "Indigo")))
).toDF

df.printSchema

// root
//  |-- features: struct (nullable = true)
//  |    |-- feat1: string (nullable = true)
//  |    |-- feat2: string (nullable = true)
//  |    |-- feat3: string (nullable = true)
//  |-- arrayOfFeatures: array (nullable = true)
//  |    |-- element: struct (containsNull = true)
//  |    |    |-- feat1: string (nullable = true)
//  |    |    |-- feat2: string (nullable = true)
//  |    |    |-- feat3: string (nullable = true)

df.show(false)

// +-----------------+----------------------------------------------+
// |features         |arrayOfFeatures                               |
// +-----------------+----------------------------------------------+
// |[hello, world, !]|[[red, orange, yellow], [green, blue, Indigo]]|
// +-----------------+----------------------------------------------+

次に、feat2 from featuresは次のように単純です:

import com.github.fqaiser94.mse.methods._

// drop feat2 from features
df.withColumn("features", $"features".dropFields("feat2")).show(false)

// +----------+----------------------------------------------+
// |features  |arrayOfFeatures                               |
// +----------+----------------------------------------------+
// |[hello, !]|[[red, orange, yellow], [green, blue, Indigo]]|
// +----------+----------------------------------------------+

他のソリューションについて、配列内にネストされた構造体内にネストされた列をドロップする方法があるかどうかを尋ねるフォローアップコメントがたくさんあることに気付きました。これは、 Make Structs Easy ライブラリによって提供される関数と spark-hofs ライブラリによって提供される関数を次のように組み合わせることで実現できます。

import za.co.absa.spark.hofs._

// drop feat2 in each element of arrayOfFeatures
df.withColumn("arrayOfFeatures", transform($"arrayOfFeatures", features => features.dropFields("feat2"))).show(false)

// +-----------------+--------------------------------+
// |features         |arrayOfFeatures                 |
// +-----------------+--------------------------------+
// |[hello, world, !]|[[red, yellow], [green, Indigo]]|
// +-----------------+--------------------------------+

*完全な開示:この回答で参照されている Make Structs Easy ライブラリの作成者です。

0
fqaiser94

Java version Solution for thisを追加します。

ユーティリティクラス(dropNestedColumn関数にドロップする必要があるデータセットとネストされた列を渡します)。

(Lior Chagaの回答にはバグがほとんどありません。私は彼の回答を使おうとしたときに修正しました)。

public class NestedColumnActions {
/*
dataset : dataset in which we want to drop columns
columnName : nested column that needs to be deleted
*/
public static Dataset<?> dropNestedColumn(Dataset<?> dataset, String columnName) {

    //Special case of top level column deletion
    if(!columnName.contains("."))
        return dataset.drop(columnName);

    final DataSetModifier dataFrameFolder = new DataSetModifier(dataset);
    Arrays.stream(dataset.schema().fields())
            .flatMap(f -> {
                //If the column name to be deleted starts with current top level column
                if (columnName.startsWith(f.name() + DOT)) {
                    //Get new column structure under f , expected after deleting the required column
                    final Optional<Column> column = dropSubColumn(functions.col(f.name()), f.dataType(), f.name(), columnName);
                    if (column.isPresent()) {
                        return Stream.of(new Tuple2<>(f.name(), column));
                    } else {
                        return Stream.empty();
                    }
                } else {
                    return Stream.empty();
                }
            })
            //Call accept function with Tuples of (top level column name, new column structure under it)
            .forEach(colTuple -> dataFrameFolder.accept(colTuple));

    return dataFrameFolder.getDataset();
}

private static Optional<Column> dropSubColumn(Column col, DataType colType, String fullColumnName, String dropColumnName) {
    Optional<Column> column = Optional.empty();
    if (!fullColumnName.equals(dropColumnName)) {
        if (colType instanceof StructType) {
            if (dropColumnName.startsWith(fullColumnName + DOT)) {
                column = Optional.of(functions.struct(getColumns(col, (StructType) colType, fullColumnName, dropColumnName)));
            }
            else {
                column = Optional.of(col);
            }
        } else {
            column = Optional.of(col);
        }
    }

    return column;
}

private static Column[] getColumns(Column col, StructType colType, String fullColumnName, String dropColumnName) {
    return Arrays.stream(colType.fields())
            .flatMap(f -> {
                        final Optional<Column> column = dropSubColumn(col.getField(f.name()), f.dataType(),
                                fullColumnName + "." + f.name(), dropColumnName);
                        if (column.isPresent()) {
                            return Stream.of(column.get().alias(f.name()));
                        } else {
                            return Stream.empty();
                        }
                    }
            ).toArray(Column[]::new);

}

private static class DataSetModifier implements Consumer<Tuple2<String, Optional<Column>>> {
    private Dataset<?> df;

    public DataSetModifier(Dataset<?> df) {
        this.df = df;
    }

    public Dataset<?> getDataset() {
        return df;
    }

    /*
    colTuple[0]:top level column name
    colTuple[1]:new column structure under it
   */
    @Override
    public void accept(Tuple2<String, Optional<Column>> colTuple) {
        if (!colTuple._2().isPresent()) {
            df = df.drop(colTuple._1());
        } else {
            df = df.withColumn(colTuple._1(), colTuple._2().get());
        }
    }
}

}

0

PySparkの実装

import pyspark.sql.functions as sf

def _drop_nested_field(
    schema: StructType,
    field_to_drop: str,
    parents: List[str] = None,
) -> Column:
    parents = list() if parents is None else parents
    src_col = lambda field_names: sf.col('.'.join(f'`{c}`' for c in field_names))

    if '.' in field_to_drop:
        root, subfield = field_to_drop.split('.', maxsplit=1)
        field_to_drop_from = next(f for f in schema.fields if f.name == root)

        return sf.struct(
            *[src_col(parents + [f.name]) for f in schema.fields if f.name != root],
            _drop_nested_field(
                schema=field_to_drop_from.dataType,
                field_to_drop=subfield,
                parents=parents + [root]
            ).alias(root)
        )

    else:
        # select all columns except the one to drop
        return sf.struct(
            *[src_col(parents + [f.name])for f in schema.fields if f.name != field_to_drop],
        )


def drop_nested_field(
    df: DataFrame,
    field_to_drop: str,
) -> DataFrame:
    if '.' in field_to_drop:
        root, subfield = field_to_drop.split('.', maxsplit=1)
        field_to_drop_from = next(f for f in df.schema.fields if f.name == root)

        return df.withColumn(root, _drop_nested_field(
            schema=field_to_drop_from.dataType,
            field_to_drop=subfield,
            parents=[root]
        ))
    else:
        return df.drop(field_to_drop)


df = drop_nested_field(df, 'a.b.c.d')
0
M.Vanderlee