带spark 3.0.0的arrayindexoutofboundsexception读取或配置单元表

gcmastyq  于 2021-05-27  发布在  Spark
关注(0)|答案(0)|浏览(404)

我正在尝试读取spark 3.0.0中的orc配置单元表,我在同一集群中阅读了spark 1.6,没有任何问题。我使用的是hive1.2,并从apachespark发布档案下载了spark-3.0.0-bin-hadoop2.7-hive1.2.tgz。查询是:

val free_unit = sql("SELECT * from cdrs.pro_free_unit where ((aniomes= 202003 or month = 202004)) and sub_id=100000000003637081 and free_unit_type_id=5052")
free_unit.select((Array("init_balance", "brandid","dia") ++ free_unit.columns.filter(_.startsWith("free_uni"))).map(free_unit(_)) : _*).show(50,false)

错误是:

20/09/10 16:28:07 WARN impl.SchemaEvolution: Column names are missing from this file. This is caused by a writer earlier than HIVE-4243. The reader will reconcile schemas based on index. File type: struct<_col0:bigint,_col1:bigint,_col2:varchar(1),_col3:varchar(1),_col4:varchar(1),_col5:timestamp,_col6:timestamp,_col7:varchar(4),_col8:varchar(64),_col9:varchar(4),_col10:varchar(64),_col11:varchar(128),_col12:varchar(32),_col13:bigint,_col14:varchar(1),_col15:bigint,_col16:varchar(64),_col17:varchar(64),_col18:varchar(4000),_col19:varchar(1),_col20:bigint,_col21:bigint,_col22:bigint,_col23:varchar(1),_col24:bigint,_col25:timestamp,_col26:timestamp,_col27:bigint,_col28:bigint,_col29:varchar(128),_col30:varchar(32),_col31:varchar(64),_col32:varchar(1),_col33:varchar(1),_col34:varchar(1),_col35:bigint,_col36:varchar(1),_col37:bigint,_col38:varchar(1),_col39:timestamp,_col40:timestamp,_col41:varchar(1),_col42:timestamp,_col43:varchar(1),_col44:bigint,_col45:int,_col46:varchar(45)>, reader type: struct<sub_id:bigint,free_unit_id:bigint,free_unit_owner_type:string,free_unit_type_id:bigint,free_unit_type_name:string,free_unit_type_code:string,init_balance:bigint,brandid:bigint,aniomes:string,dia:string>

Driver stacktrace:
  at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2023)
  at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1972)
  at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1971)
  at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
  at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1971)
  at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:950)
  at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:950)
  at scala.Option.foreach(Option.scala:407)
  at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:950)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2203)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2152)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2141)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:752)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2093)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2114)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2133)
  at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:467)
  at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:420)
  at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:47)
  at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3625)
  at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2695)
  at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3616)
  at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
  at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
  at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
  at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
  at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3614)
  at org.apache.spark.sql.Dataset.head(Dataset.scala:2695)
  at org.apache.spark.sql.Dataset.take(Dataset.scala:2902)
  at org.apache.spark.sql.Dataset.getRows(Dataset.scala:300)
  at org.apache.spark.sql.Dataset.showString(Dataset.scala:337)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:826)
  ... 47 elided
Caused by: java.lang.ArrayIndexOutOfBoundsException: 13
  at org.apache.spark.sql.execution.datasources.orc.OrcColumnarBatchReader.initBatch(OrcColumnarBatchReader.java:183)
  at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:216)
  at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
  at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
  at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
  at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
  at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
  at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
  at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
  at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
  at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
  at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  at org.apache.spark.scheduler.Task.run(Task.scala:127)
  at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  at java.lang.Thread.run(Thread.java:745)

有什么办法解决这个问题吗?
提前谢谢!

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题