Skip to content

Commit cd7cdd0

Browse files
committed
Rename GlutenColumnarWriteFilesExec to ColumnarWriteFilesExec and GlutenColumnarWriteFilesRDD to ColumnarWriteFilesRDD
1 parent 197ddcd commit cd7cdd0

File tree

6 files changed

+23
-23
lines changed

6 files changed

+23
-23
lines changed

backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class VeloxParquetWriteForHiveSuite extends GlutenQueryTest with SQLTestUtils {
8686
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
8787
if (!nativeUsed) {
8888
nativeUsed = if (isSparkVersionGE("3.4")) {
89-
qe.executedPlan.find(_.isInstanceOf[GlutenColumnarWriteFilesExec]).isDefined
89+
qe.executedPlan.find(_.isInstanceOf[ColumnarWriteFilesExec]).isDefined
9090
} else {
9191
qe.executedPlan.find(_.isInstanceOf[FakeRowAdaptor]).isDefined
9292
}

gluten-core/src/main/scala/org/apache/gluten/backendsapi/SparkPlanExecApi.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ import org.apache.spark.sql.catalyst.plans.JoinType
3939
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
4040
import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, Partitioning}
4141
import org.apache.spark.sql.catalyst.rules.Rule
42-
import org.apache.spark.sql.execution.{BackendWrite, FileSourceScanExec, GenerateExec, GlutenColumnarWriteFilesExec, LeafExecNode, SparkPlan}
42+
import org.apache.spark.sql.execution.{BackendWrite, ColumnarWriteFilesExec, FileSourceScanExec, GenerateExec, LeafExecNode, SparkPlan}
4343
import org.apache.spark.sql.execution.datasources.{FileFormat, WriteJobDescription}
4444
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, FileScan}
4545
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
@@ -389,7 +389,7 @@ trait SparkPlanExecApi {
389389
bucketSpec: Option[BucketSpec],
390390
options: Map[String, String],
391391
staticPartitions: TablePartitionSpec): SparkPlan = {
392-
GlutenColumnarWriteFilesExec(
392+
ColumnarWriteFilesExec(
393393
child,
394394
fileFormat,
395395
partitionColumns,

gluten-core/src/main/scala/org/apache/spark/sql/execution/GlutenColumnarWriteFilesExec.scala gluten-core/src/main/scala/org/apache/spark/sql/execution/ColumnarWriteFilesExec.scala

+8-8
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException
4040
import java.util.Date
4141

4242
/**
43-
* This trait is used in [[GlutenColumnarWriteFilesRDD]] to inject the staging write path before
43+
* This trait is used in [[ColumnarWriteFilesRDD]] to inject the staging write path before
4444
* initializing the native plan and collect native write files metrics for each backend.
4545
*/
4646
trait BackendWrite {
@@ -51,7 +51,7 @@ trait BackendWrite {
5151
* This RDD is used to make sure we have injected staging write path before initializing the native
5252
* plan, and support Spark file commit protocol.
5353
*/
54-
class GlutenColumnarWriteFilesRDD(
54+
class ColumnarWriteFilesRDD(
5555
var prev: RDD[ColumnarBatch],
5656
description: WriteJobDescription,
5757
committer: FileCommitProtocol,
@@ -156,7 +156,7 @@ class GlutenColumnarWriteFilesRDD(
156156
// we need to expose a dummy child (as right child) with type "WriteFilesExec" to let Spark
157157
// choose the new write code path (version >= 3.4). The actual plan to write is the left child
158158
// of this operator.
159-
case class GlutenColumnarWriteFilesExec private (
159+
case class ColumnarWriteFilesExec private (
160160
override val left: SparkPlan,
161161
override val right: SparkPlan,
162162
fileFormat: FileFormat,
@@ -166,7 +166,7 @@ case class GlutenColumnarWriteFilesExec private (
166166
staticPartitions: TablePartitionSpec)
167167
extends BinaryExecNode
168168
with GlutenPlan
169-
with GlutenColumnarWriteFilesExec.ExecuteWriteCompatible {
169+
with ColumnarWriteFilesExec.ExecuteWriteCompatible {
170170

171171
val child: SparkPlan = left
172172

@@ -217,7 +217,7 @@ case class GlutenColumnarWriteFilesExec private (
217217
// partition rdd to make sure we at least set up one write task to write the metadata.
218218
writeFilesForEmptyRDD(description, committer, jobTrackerID)
219219
} else {
220-
new GlutenColumnarWriteFilesRDD(rdd, description, committer, jobTrackerID)
220+
new ColumnarWriteFilesRDD(rdd, description, committer, jobTrackerID)
221221
}
222222
}
223223
override protected def withNewChildrenInternal(
@@ -226,15 +226,15 @@ case class GlutenColumnarWriteFilesExec private (
226226
copy(newLeft, newRight, fileFormat, partitionColumns, bucketSpec, options, staticPartitions)
227227
}
228228

229-
object GlutenColumnarWriteFilesExec {
229+
object ColumnarWriteFilesExec {
230230

231231
def apply(
232232
child: SparkPlan,
233233
fileFormat: FileFormat,
234234
partitionColumns: Seq[Attribute],
235235
bucketSpec: Option[BucketSpec],
236236
options: Map[String, String],
237-
staticPartitions: TablePartitionSpec): GlutenColumnarWriteFilesExec = {
237+
staticPartitions: TablePartitionSpec): ColumnarWriteFilesExec = {
238238
// This is a workaround for FileFormatWriter#write. Vanilla Spark (version >= 3.4) requires for
239239
// a plan that has at least one node exactly of type `WriteFilesExec` that is a Scala
240240
// case-class, to decide to choose new `#executeWrite` code path over the legacy `#execute`
@@ -253,7 +253,7 @@ object GlutenColumnarWriteFilesExec {
253253
options,
254254
staticPartitions)
255255

256-
GlutenColumnarWriteFilesExec(
256+
ColumnarWriteFilesExec(
257257
child,
258258
right,
259259
fileFormat,

gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/GlutenV1WriteCommandSuite.scala

+5-5
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import org.apache.gluten.execution.SortExecTransformer
2121
import org.apache.spark.sql.GlutenSQLTestsBaseTrait
2222
import org.apache.spark.sql.catalyst.expressions.{Ascending, AttributeReference, NullsFirst, SortOrder}
2323
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Sort}
24-
import org.apache.spark.sql.execution.{GlutenColumnarWriteFilesExec, QueryExecution, SortExec}
24+
import org.apache.spark.sql.execution.{ColumnarWriteFilesExec, QueryExecution, SortExec}
2525
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
2626
import org.apache.spark.sql.internal.SQLConf
2727
import org.apache.spark.sql.types.{IntegerType, StringType}
@@ -122,8 +122,8 @@ class GlutenV1WriteCommandSuite
122122
val executedPlan = FileFormatWriter.executedPlan.get
123123

124124
val plan = if (enabled) {
125-
assert(executedPlan.isInstanceOf[GlutenColumnarWriteFilesExec])
126-
executedPlan.asInstanceOf[GlutenColumnarWriteFilesExec].child
125+
assert(executedPlan.isInstanceOf[ColumnarWriteFilesExec])
126+
executedPlan.asInstanceOf[ColumnarWriteFilesExec].child
127127
} else {
128128
executedPlan.transformDown { case a: AdaptiveSparkPlanExec => a.executedPlan }
129129
}
@@ -204,8 +204,8 @@ class GlutenV1WriteCommandSuite
204204
val executedPlan = FileFormatWriter.executedPlan.get
205205

206206
val plan = if (enabled) {
207-
assert(executedPlan.isInstanceOf[GlutenColumnarWriteFilesExec])
208-
executedPlan.asInstanceOf[GlutenColumnarWriteFilesExec].child
207+
assert(executedPlan.isInstanceOf[ColumnarWriteFilesExec])
208+
executedPlan.asInstanceOf[ColumnarWriteFilesExec].child
209209
} else {
210210
executedPlan.transformDown { case a: AdaptiveSparkPlanExec => a.executedPlan }
211211
}

gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala

+4-4
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import org.apache.spark.executor.OutputMetrics
2424
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
2525
import org.apache.spark.sql._
2626
import org.apache.spark.sql.catalyst.TableIdentifier
27-
import org.apache.spark.sql.execution.{CommandResultExec, GlutenColumnarWriteFilesExec, QueryExecution}
27+
import org.apache.spark.sql.execution.{ColumnarWriteFilesExec, CommandResultExec, QueryExecution}
2828
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
2929
import org.apache.spark.sql.execution.command.DataWritingCommandExec
3030
import org.apache.spark.sql.execution.metric.SQLMetric
@@ -60,13 +60,13 @@ class GlutenInsertSuite
6060
super.afterAll()
6161
}
6262

63-
private def checkAndGetWriteFiles(df: DataFrame): GlutenColumnarWriteFilesExec = {
63+
private def checkAndGetWriteFiles(df: DataFrame): ColumnarWriteFilesExec = {
6464
val writeFiles = stripAQEPlan(
6565
df.queryExecution.executedPlan
6666
.asInstanceOf[CommandResultExec]
6767
.commandPhysicalPlan).children.head
68-
assert(writeFiles.isInstanceOf[GlutenColumnarWriteFilesExec])
69-
writeFiles.asInstanceOf[GlutenColumnarWriteFilesExec]
68+
assert(writeFiles.isInstanceOf[ColumnarWriteFilesExec])
69+
writeFiles.asInstanceOf[ColumnarWriteFilesExec]
7070
}
7171

7272
testGluten("insert partition table") {

gluten-ut/spark35/src/test/backends-velox/org/apache/gluten/GlutenColumnarWriteTestSupport.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@
1616
*/
1717
package org.apache.gluten
1818

19-
import org.apache.spark.sql.execution.{SparkPlan, GlutenColumnarWriteFilesExec}
19+
import org.apache.spark.sql.execution.{SparkPlan, ColumnarWriteFilesExec}
2020

2121
trait GlutenColumnarWriteTestSupport {
2222

2323
def checkWriteFilesAndGetChild(sparkPlan: SparkPlan): SparkPlan = {
24-
assert(sparkPlan.isInstanceOf[GlutenColumnarWriteFilesExec])
25-
sparkPlan.asInstanceOf[GlutenColumnarWriteFilesExec].child
24+
assert(sparkPlan.isInstanceOf[ColumnarWriteFilesExec])
25+
sparkPlan.asInstanceOf[ColumnarWriteFilesExec].child
2626
}
2727
}

0 commit comments

Comments
 (0)