@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException
40
40
import java .util .Date
41
41
42
42
/**
43
- * This trait is used in [[GlutenColumnarWriteFilesRDD ]] to inject the staging write path before
43
+ * This trait is used in [[ColumnarWriteFilesRDD ]] to inject the staging write path before
44
44
* initializing the native plan and collect native write files metrics for each backend.
45
45
*/
46
46
trait BackendWrite {
@@ -51,7 +51,7 @@ trait BackendWrite {
51
51
* This RDD is used to make sure we have injected staging write path before initializing the native
52
52
* plan, and support Spark file commit protocol.
53
53
*/
54
- class GlutenColumnarWriteFilesRDD (
54
+ class ColumnarWriteFilesRDD (
55
55
var prev : RDD [ColumnarBatch ],
56
56
description : WriteJobDescription ,
57
57
committer : FileCommitProtocol ,
@@ -156,7 +156,7 @@ class GlutenColumnarWriteFilesRDD(
156
156
// we need to expose a dummy child (as right child) with type "WriteFilesExec" to let Spark
157
157
// choose the new write code path (version >= 3.4). The actual plan to write is the left child
158
158
// of this operator.
159
- case class GlutenColumnarWriteFilesExec private (
159
+ case class ColumnarWriteFilesExec private (
160
160
override val left : SparkPlan ,
161
161
override val right : SparkPlan ,
162
162
fileFormat : FileFormat ,
@@ -166,7 +166,7 @@ case class GlutenColumnarWriteFilesExec private (
166
166
staticPartitions : TablePartitionSpec )
167
167
extends BinaryExecNode
168
168
with GlutenPlan
169
- with GlutenColumnarWriteFilesExec .ExecuteWriteCompatible {
169
+ with ColumnarWriteFilesExec .ExecuteWriteCompatible {
170
170
171
171
val child : SparkPlan = left
172
172
@@ -217,7 +217,7 @@ case class GlutenColumnarWriteFilesExec private (
217
217
// partition rdd to make sure we at least set up one write task to write the metadata.
218
218
writeFilesForEmptyRDD(description, committer, jobTrackerID)
219
219
} else {
220
- new GlutenColumnarWriteFilesRDD (rdd, description, committer, jobTrackerID)
220
+ new ColumnarWriteFilesRDD (rdd, description, committer, jobTrackerID)
221
221
}
222
222
}
223
223
override protected def withNewChildrenInternal (
@@ -226,15 +226,15 @@ case class GlutenColumnarWriteFilesExec private (
226
226
copy(newLeft, newRight, fileFormat, partitionColumns, bucketSpec, options, staticPartitions)
227
227
}
228
228
229
- object GlutenColumnarWriteFilesExec {
229
+ object ColumnarWriteFilesExec {
230
230
231
231
def apply (
232
232
child : SparkPlan ,
233
233
fileFormat : FileFormat ,
234
234
partitionColumns : Seq [Attribute ],
235
235
bucketSpec : Option [BucketSpec ],
236
236
options : Map [String , String ],
237
- staticPartitions : TablePartitionSpec ): GlutenColumnarWriteFilesExec = {
237
+ staticPartitions : TablePartitionSpec ): ColumnarWriteFilesExec = {
238
238
// This is a workaround for FileFormatWriter#write. Vanilla Spark (version >= 3.4) requires for
239
239
// a plan that has at least one node exactly of type `WriteFilesExec` that is a Scala
240
240
// case-class, to decide to choose new `#executeWrite` code path over the legacy `#execute`
@@ -253,7 +253,7 @@ object GlutenColumnarWriteFilesExec {
253
253
options,
254
254
staticPartitions)
255
255
256
- GlutenColumnarWriteFilesExec (
256
+ ColumnarWriteFilesExec (
257
257
child,
258
258
right,
259
259
fileFormat,
0 commit comments