djl icon indicating copy to clipboard operation
djl copied to clipboard

ai.djl.engine.EngineException: Failed to load PyTorch native library

Open shutter-cp opened this issue 2 years ago • 5 comments

23/02/01 16:09:44 ERROR Executor: Exception in task 0.0 in stage 3.0 (TID 2)
ai.djl.engine.EngineException: Failed to load PyTorch native library
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:85)
	at ai.djl.pytorch.engine.PtEngineProvider.getEngine(PtEngineProvider.java:40)
	at ai.djl.engine.Engine.getEngine(Engine.java:186)
	at ai.djl.Model.newInstance(Model.java:99)
	at ai.djl.repository.zoo.BaseModelLoader.createModel(BaseModelLoader.java:189)
	at ai.djl.repository.zoo.BaseModelLoader.loadModel(BaseModelLoader.java:152)
	at ai.djl.repository.zoo.Criteria.loadModel(Criteria.java:168)
	at net.xxx.ai.recommender.core.NERModel$.loadModel(NERModel.scala:39)
	at net.xxx.ai.recommender.core.NERModel$.model$lzycompute$1(NERModel.scala:64)
	at net.xxx.ai.recommender.core.NERModel$.net$qihoo$ai$recommender$core$NERModel$$model$1(NERModel.scala:64)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:68)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:66)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:188)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:185)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:415)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:421)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.UnsatisfiedLinkError: /data01/home/yarn/.djl.ai/pytorch/1.9.1-cpu-linux-x86_64/libc10.so: /usr/lib64/libstdc++.so.6: version `GLIBCXX_3.4.21' not found (required by /data01/home/yarn/.djl.ai/pytorch/1.9.1-cpu-linux-x86_64/libc10.so)
	at java.lang.ClassLoader$NativeLibrary.load(Native Method)
	at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1938)
	at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1821)
	at java.lang.Runtime.load0(Runtime.java:809)
	at java.lang.System.load(System.java:1086)
	at ai.djl.pytorch.jni.LibUtils.loadNativeLibrary(LibUtils.java:368)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:184)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:175)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.Iterator.forEachRemaining(Iterator.java:116)
	at java.util.Spliterators$IteratorSpliterator.forEachRemaining(Spliterators.java:1801)
	at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481)
	at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471)
	at java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:151)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:174)
	at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
	at java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:418)
	at ai.djl.pytorch.jni.LibUtils.loadLibTorch(LibUtils.java:146)
	at ai.djl.pytorch.jni.LibUtils.loadLibrary(LibUtils.java:78)
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:54)
	... 29 more
23/02/01 16:09:44 ERROR Executor: Exception in task 1.0 in stage 3.0 (TID 3)
ai.djl.engine.EngineException: Failed to load PyTorch native library
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:85)
	at ai.djl.pytorch.engine.PtEngineProvider.getEngine(PtEngineProvider.java:40)
	at ai.djl.engine.Engine.getEngine(Engine.java:186)
	at ai.djl.Model.newInstance(Model.java:99)
	at ai.djl.repository.zoo.BaseModelLoader.createModel(BaseModelLoader.java:189)
	at ai.djl.repository.zoo.BaseModelLoader.loadModel(BaseModelLoader.java:152)
	at ai.djl.repository.zoo.Criteria.loadModel(Criteria.java:168)
	at net.xxx.ai.recommender.core.NERModel$.loadModel(NERModel.scala:39)
	at net.xxx.ai.recommender.core.NERModel$.model$lzycompute$1(NERModel.scala:64)
	at net.xxx.ai.recommender.core.NERModel$.net$qihoo$ai$recommender$core$NERModel$$model$1(NERModel.scala:64)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:68)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:66)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:188)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:185)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:415)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:421)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.UnsatisfiedLinkError: /data01/home/yarn/.djl.ai/pytorch/1.9.1-cpu-linux-x86_64/libc10.so: /usr/lib64/libstdc++.so.6: version `GLIBCXX_3.4.21' not found (required by /data01/home/yarn/.djl.ai/pytorch/1.9.1-cpu-linux-x86_64/libc10.so)
	at java.lang.ClassLoader$NativeLibrary.load(Native Method)
	at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1938)
	at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1821)
	at java.lang.Runtime.load0(Runtime.java:809)
	at java.lang.System.load(System.java:1086)
	at ai.djl.pytorch.jni.LibUtils.loadNativeLibrary(LibUtils.java:368)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:184)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:175)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.Iterator.forEachRemaining(Iterator.java:116)
	at java.util.Spliterators$IteratorSpliterator.forEachRemaining(Spliterators.java:1801)
	at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481)
	at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471)
	at java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:151)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:174)
	at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
	at java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:418)
	at ai.djl.pytorch.jni.LibUtils.loadLibTorch(LibUtils.java:146)
	at ai.djl.pytorch.jni.LibUtils.loadLibrary(LibUtils.java:78)
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:54)
	... 29 more
23/02/01 16:10:16 ERROR CoarseGrainedExecutorBackend: RECEIVED SIGNAL TERM

shutter-cp avatar Feb 01 '23 08:02 shutter-cp

@shutter-cp Which version of linux are you using? It looks like your libstdc++.so.6 is too old, can you upgrade to latest version?

You can also try our precxx11 version of PyTorch, see: https://docs.djl.ai/master/engines/pytorch/pytorch-engine/index.html#for-pre-cxx11-build

frankfliu avatar Feb 01 '23 15:02 frankfliu

@shutter-cp Which version of linux are you using? It looks like your libstdc++.so.6 is too old, can you upgrade to latest version?

You can also try our precxx11 version of PyTorch, see: https://docs.djl.ai/master/engines/pytorch/pytorch-engine/index.html#for-pre-cxx11-build

PyTorch native library
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:85)
	at ai.djl.pytorch.engine.PtEngineProvider.getEngine(PtEngineProvider.java:40)
	at ai.djl.engine.Engine.getEngine(Engine.java:186)
	at ai.djl.Model.newInstance(Model.java:99)
	at ai.djl.repository.zoo.BaseModelLoader.createModel(BaseModelLoader.java:189)
	at ai.djl.repository.zoo.BaseModelLoader.loadModel(BaseModelLoader.java:152)
	at ai.djl.repository.zoo.Criteria.loadModel(Criteria.java:168)
	at net.xxx.ai.recommender.core.NERModel$.loadModel(NERModel.scala:39)
	at net.xxx.ai.recommender.core.NERModel$.model$lzycompute$1(NERModel.scala:65)
	at net.xxx.ai.recommender.core.NERModel$.net$xxx$ai$recommender$core$NERModel$$model$1(NERModel.scala:65)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:69)
	at net.xxx.ai.recommender.core.NERModel$$anonfun$3.apply(NERModel.scala:67)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:188)
	at org.apache.spark.sql.execution.MapPartitionsExec$$anonfun$5.apply(objects.scala:185)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:868)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:415)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:421)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.UnsatisfiedLinkError: /data01/home/yarn/.djl.ai/pytorch/1.13.0-20221116-cpu-precxx11-linux-x86_64/libstdc++.so.6: /lib64/libc.so.6: version `GLIBC_2.14' not found (required by /data01/home/yarn/.djl.ai/pytorch/1.13.0-20221116-cpu-precxx11-linux-x86_64/libstdc++.so.6)
	at java.lang.ClassLoader$NativeLibrary.load(Native Method)
	at java.lang.ClassLoader.loadLibrary0(ClassLoader.java:1938)
	at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1821)
	at java.lang.Runtime.load0(Runtime.java:809)
	at java.lang.System.load(System.java:1086)
	at ai.djl.pytorch.jni.LibUtils.loadNativeLibrary(LibUtils.java:368)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:184)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:175)
	at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193)
	at java.util.Iterator.forEachRemaining(Iterator.java:116)
	at java.util.Spliterators$IteratorSpliterator.forEachRemaining(Spliterators.java:1801)
	at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481)
	at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471)
	at java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:151)
	at java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:174)
	at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
	at java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:418)
	at ai.djl.pytorch.jni.LibUtils.loadLibTorch(LibUtils.java:146)
	at ai.djl.pytorch.jni.LibUtils.loadLibrary(LibUtils.java:78)
	at ai.djl.pytorch.engine.PtEngine.newInstance(PtEngine.java:54)
	... 29 more
23/02/01 19:25:27 ERROR CoarseGrainedExecutorBackend: RECEIVED SIGNAL TERM

pom

<dependency>
            <groupId>ai.djl</groupId>
            <artifactId>api</artifactId>
        </dependency>
        <!-- Pytorch -->
        <dependency>
            <groupId>ai.djl.pytorch</groupId>
            <artifactId>pytorch-model-zoo</artifactId>
        </dependency>
        <dependency>
            <groupId>ai.djl.pytorch</groupId>
            <artifactId>pytorch-engine</artifactId>
        </dependency>
 <dependency>
            <groupId>ai.djl.hadoop</groupId>
            <artifactId>hadoop</artifactId>
        </dependency>
        <dependency>
            <groupId>ai.djl.spark</groupId>
            <artifactId>spark</artifactId>
        </dependency>

 <dependency>
            <groupId>ai.djl.pytorch</groupId>
            <artifactId>pytorch-native-cpu-precxx11</artifactId>
            <classifier>linux-x86_64</classifier>
            <version>1.13.0</version>
        </dependency>
        <dependency>
            <groupId>ai.djl.pytorch</groupId>
            <artifactId>pytorch-jni</artifactId>
            <version>1.13.0-0.20.0</version>
        </dependency>

shutter-cp avatar Feb 02 '23 02:02 shutter-cp

Are you running centos 6? Your os might be too old.

The minimal linux version PyTorch can support is centos 7, ubuntu 18.04, Amazon Linux 2.

frankfliu avatar Feb 02 '23 02:02 frankfliu

Are you running centos 6? Your os might be too old.

The minimal linux version PyTorch can support is centos 7, ubuntu 18.04, Amazon Linux 2. use centos 7 No matching model with specified Input/Output type found. input and output is string

Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 4 times, most recent failure: Lost task 0.3 in stage 3.0 (TID 6) (10.22.0.38 executor 1): ai.djl.repository.zoo.ModelNotFoundException: No matching model with specified Input/Output type found.
        at ai.djl.repository.zoo.Criteria.loadModel(Criteria.java:180)
        at ai.djl.repository.zoo.ModelZoo.loadModel(ModelZoo.java:141)
        at net.qihoo.ai.recommender.core.NERModel$.loadModel(NERModel.scala:34)
        at net.qihoo.ai.recommender.core.NERModel$.model$lzycompute$1(NERModel.scala:62)
        at net.qihoo.ai.recommender.core.NERModel$.model$1(NERModel.scala:62)
        at net.qihoo.ai.recommender.core.NERModel$.$anonfun$doNer$3(NERModel.scala:65)
        at org.apache.spark.sql.execution.MapPartitionsExec.$anonfun$doExecute$3(objects.scala:195)
        at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
        at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
        at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
        at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
        at java.base/java.lang.Thread.run(Unknown Source)

Driver stacktrace:
        at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2303)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2252)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2251)
        at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
        at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2251)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1124)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1124)
        at scala.Option.foreach(Option.scala:407)
        at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1124)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2490)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2432)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2421)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:902)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2196)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2217)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2236)
        at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:472)
        at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:425)
        at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:47)
        at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3709)
        at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2735)
        at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3700)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
        at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3698)
        at org.apache.spark.sql.Dataset.head(Dataset.scala:2735)
        at org.apache.spark.sql.Dataset.take(Dataset.scala:2942)
        at org.apache.spark.sql.Dataset.getRows(Dataset.scala:302)
        at org.apache.spark.sql.Dataset.showString(Dataset.scala:339)
        at org.apache.spark.sql.Dataset.show(Dataset.scala:828)
        at net.qihoo.ai.recommender.core.NERModel$.doNer(NERModel.scala:99)
        at net.qihoo.ai.recommender.RecommenderApp$.recommender(RecommenderApp.scala:233)
        at net.qihoo.ai.recommender.RecommenderApp$.main(RecommenderApp.scala:14)
        at net.qihoo.ai.recommender.RecommenderApp.main(RecommenderApp.scala)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
        at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
        at java.base/java.lang.reflect.Method.invoke(Unknown Source)
        at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
        at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:951)
        at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)
        at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)
        at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)
        at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1039)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1048)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: ai.djl.repository.zoo.ModelNotFoundException: No matching model with specified Input/Output type found.
        at ai.djl.repository.zoo.Criteria.loadModel(Criteria.java:180)
        at ai.djl.repository.zoo.ModelZoo.loadModel(ModelZoo.java:141)
        at net.qihoo.ai.recommender.core.NERModel$.loadModel(NERModel.scala:34)
        at net.qihoo.ai.recommender.core.NERModel$.model$lzycompute$1(NERModel.scala:62)
        at net.qihoo.ai.recommender.core.NERModel$.model$1(NERModel.scala:62)
        at net.qihoo.ai.recommender.core.NERModel$.$anonfun$doNer$3(NERModel.scala:65)
        at org.apache.spark.sql.execution.MapPartitionsExec.$anonfun$doExecute$3(objects.scala:195)
        at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
        at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
        at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
        at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
        at java.base/java.lang.Thread.run(Unknown Source)
val predictor: Predictor[java.lang.String, java.lang.String] = model.newPredictor

shutter-cp avatar Feb 02 '23 10:02 shutter-cp

@shutter-cp

  1. Looks like you didn't specify your translator, see example here: https://github.com/deepjavalibrary/djl/blob/master/extensions/tokenizers/src/test/java/ai/djl/huggingface/tokenizers/TokenClassificationTranslatorTest.java#L89-L110

  2. You can use our built-in model. See example https://github.com/deepjavalibrary/djl/blob/master/extensions/tokenizers/src/test/java/ai/djl/huggingface/zoo/ModelZooTest.java#L45-L60

You can replace with your model name.

  1. It's better if you can share your code with us to further debug.

xyang16 avatar Feb 02 '23 23:02 xyang16