Skip to content

fix: Fallback to Spark when PARQUET_FIELD_ID_READ_ENABLED=true for new native scans #1757

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 38 additions & 30 deletions spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala
Original file line number Diff line number Diff line change
Expand Up @@ -41,36 +41,44 @@ import org.apache.comet.parquet.{CometParquetScan, SupportsComet}
* Spark physical optimizer rule for replacing Spark scans with Comet scans.
*/
case class CometScanRule(session: SparkSession) extends Rule[SparkPlan] {

override def apply(plan: SparkPlan): SparkPlan = {
if (!isCometLoaded(conf) || !isCometScanEnabled(conf)) {
if (!isCometLoaded(conf)) {
withInfo(plan, "Comet is not enabled")
} else if (!isCometScanEnabled(conf)) {
withInfo(plan, "Comet Scan is not enabled")
}
plan
} else {

def hasMetadataCol(plan: SparkPlan): Boolean = {
plan.expressions.exists(_.exists {
case a: Attribute =>
a.isMetadataCol
case _ => false
})
}

plan.transform {
case scan if hasMetadataCol(scan) =>
withInfo(scan, "Metadata column is not supported")

// data source V1
case scanExec: FileSourceScanExec =>
transformV1Scan(scanExec)

// data source V2
case scanExec: BatchScanExec =>
transformV2Scan(scanExec)
}
if (!isCometLoaded(conf)) {
withInfo(plan, "Comet is not enabled")
return plan
}

if (!isCometScanEnabled(conf)) {
withInfo(plan, "Comet Scan is not enabled")
return plan
}

val scanImpl: String = COMET_NATIVE_SCAN_IMPL.get()
if (SQLConf.get.getConf(
SQLConf.PARQUET_FIELD_ID_READ_ENABLED) && scanImpl != CometConf.SCAN_NATIVE_COMET) {
withInfo(plan, s"Comet $scanImpl scan does not support PARQUET_FIELD_ID_READ_ENABLED")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
withInfo(plan, s"Comet $scanImpl scan does not support PARQUET_FIELD_ID_READ_ENABLED")
withInfo(plan, s"Comet $scanImpl scan does not support with enabled `spark.sql.parquet.fieldId.read.enabled`")

return plan
}
Comment on lines +57 to +61
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the change. The rest is refactoring.


def hasMetadataCol(plan: SparkPlan): Boolean = {
plan.expressions.exists(_.exists {
case a: Attribute =>
a.isMetadataCol
case _ => false
})
}

plan.transform {
case scan if hasMetadataCol(scan) =>
withInfo(scan, "Metadata column is not supported")

// data source V1
case scanExec: FileSourceScanExec =>
transformV1Scan(scanExec)

// data source V2
case scanExec: BatchScanExec =>
transformV2Scan(scanExec)
}
}

Expand All @@ -92,7 +100,7 @@ case class CometScanRule(session: SparkSession) extends Rule[SparkPlan] {
return withInfos(scanExec, fallbackReasons.toSet)
}

val scanImpl = COMET_NATIVE_SCAN_IMPL.get()
val scanImpl: String = COMET_NATIVE_SCAN_IMPL.get()
if (scanImpl == CometConf.SCAN_NATIVE_DATAFUSION && !COMET_EXEC_ENABLED.get()) {
fallbackReasons +=
s"Full native scan disabled because ${COMET_EXEC_ENABLED.key} disabled"
Expand Down
Loading