From 6c8a516ffd91857bf871b47d36913cc5beac666e Mon Sep 17 00:00:00 2001 From: Aymane Chilah Date: Fri, 22 Nov 2024 16:20:04 +0100 Subject: [PATCH] Update entities (#1612) * update predicted entities section * update --------- Co-authored-by: Aymane Chilah --- docs/_posts/aymanechilah/2023-01-03-ner_deid_large_en_3_2.md | 2 ++ .../2023-01-10-general_model_table_detection_v2_en_3_2.md | 2 ++ .../aymanechilah/2023-01-10-image_text_detector_v2_en_3_2.md | 2 ++ docs/_posts/aymanechilah/2023-01-10-text_cleaner_v1_en_3_2.md | 2 ++ .../aymanechilah/2023-01-17-docvqa_donut_base_en_3_2.md | 2 ++ .../aymanechilah/2023-01-17-docvqa_donut_base_opt_en_3_2.md | 2 ++ .../2023-07-11-dit_base_finetuned_rvlcdip_en_3_2.md | 2 ++ docs/_posts/aymanechilah/2023-07-11-tabform_v1_en_3_2.md | 2 ++ .../aymanechilah/2023-09-14-lilt_roberta_funsd_v1_en_3_2.md | 3 +++ .../aymanechilah/2023-09-14-lilt_rvl_cdip_296K_en_3_2.md | 3 +++ .../aymanechilah/2023-09-27-image_text_detector_dit_en_3_2.md | 2 ++ .../aymanechilah/2023-11-20-docvqa_pix2struct_en_3_2.md | 2 ++ .../2023-11-20-publaynet_dit_base_mrcnn_en_3_2.md | 4 +++- .../2024-03-15-docvqa_pix2struct_jsl_opt_en_3_2.md | 2 ++ .../2024-03-15-image_handwritten_detector_jsl_en_3_2.md | 2 ++ .../aymanechilah/2024-03-15-region_cell_detection_en_3_2.md | 2 ++ .../2024-03-15-region_cell_detection_v2_en_3_2.md | 2 ++ .../aymanechilah/2024-03-15-table_detection_v3_en_3_2.md | 2 ++ .../2024-04-15-chart_to_text_deplot_jsl_en_3_2.md | 1 + .../aymanechilah/2024-04-15-checkbox_detector_v1_en_3_2.md | 2 ++ .../2024-04-15-info_docvqa_pix2struct_jsl_base_opt_en_3_2.md | 2 ++ .../_posts/aymanechilah/2024-09-30-visual_re_geo_v1_en_3_2.md | 2 ++ .../_posts/aymanechilah/2024-09-30-visual_re_geo_v2_en_3_2.md | 2 ++ 23 files changed, 48 insertions(+), 1 deletion(-) diff --git a/docs/_posts/aymanechilah/2023-01-03-ner_deid_large_en_3_2.md b/docs/_posts/aymanechilah/2023-01-03-ner_deid_large_en_3_2.md index 09c7c892ff..36ca03df3b 100644 --- a/docs/_posts/aymanechilah/2023-01-03-ner_deid_large_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-03-ner_deid_large_en_3_2.md @@ -25,6 +25,8 @@ It protects specific health information that could identify living or deceased i ## Predicted Entities +``NAME``, ``AGE``, ``CONTACT``, ``LOCATION``, ``PROFESSION``, ``PERSON``, ``DATE``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/DEID_IMAGE/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/tutorials/Certification_Trainings/3.1.SparkOcrImageDeIdentification.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-01-10-general_model_table_detection_v2_en_3_2.md b/docs/_posts/aymanechilah/2023-01-10-general_model_table_detection_v2_en_3_2.md index 6a104475b2..3d0fdb8081 100644 --- a/docs/_posts/aymanechilah/2023-01-10-general_model_table_detection_v2_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-10-general_model_table_detection_v2_en_3_2.md @@ -24,6 +24,8 @@ Here it is used the CascadeTabNet general model for table detection inspired by ## Predicted Entities +``[table]``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_TABLE_DETECTION_ONLY/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrImageTableDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-01-10-image_text_detector_v2_en_3_2.md b/docs/_posts/aymanechilah/2023-01-10-image_text_detector_v2_en_3_2.md index fe59d1fb95..a3cbab26d9 100644 --- a/docs/_posts/aymanechilah/2023-01-10-image_text_detector_v2_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-10-image_text_detector_v2_en_3_2.md @@ -22,6 +22,8 @@ CRAFT: Character-Region Awareness For Text detection, is designed with a convolu ## Predicted Entities +``text``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/TEXT_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrImageTextDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-01-10-text_cleaner_v1_en_3_2.md b/docs/_posts/aymanechilah/2023-01-10-text_cleaner_v1_en_3_2.md index 0359d4792a..de7532f99d 100644 --- a/docs/_posts/aymanechilah/2023-01-10-text_cleaner_v1_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-10-text_cleaner_v1_en_3_2.md @@ -22,6 +22,8 @@ Model for cleaning image with text. It is based on text detection model with ext ## Predicted Entities +``corrected_image``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_CLEANER/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrImageCleaner.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_en_3_2.md b/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_en_3_2.md index 0bf63a3303..dde2720be2 100644 --- a/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_en_3_2.md @@ -25,6 +25,8 @@ DocVQA seeks to inspire a “purpose-driven” point of view in Document Analysi ## Predicted Entities +``answers``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/VISUAL_QUESTION_ANSWERING/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrVisualQuestionAnswering.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_opt_en_3_2.md b/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_opt_en_3_2.md index 5f11887203..3099881c69 100644 --- a/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_opt_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-01-17-docvqa_donut_base_opt_en_3_2.md @@ -24,6 +24,8 @@ DocVQA seeks to inspire a “purpose-driven” point of view in Document Analysi ## Predicted Entities +``answers``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/VISUAL_QUESTION_ANSWERING/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrVisualQuestionAnswering_opt.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-07-11-dit_base_finetuned_rvlcdip_en_3_2.md b/docs/_posts/aymanechilah/2023-07-11-dit_base_finetuned_rvlcdip_en_3_2.md index 028d755a69..5bef4d25a6 100644 --- a/docs/_posts/aymanechilah/2023-07-11-dit_base_finetuned_rvlcdip_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-07-11-dit_base_finetuned_rvlcdip_en_3_2.md @@ -26,6 +26,8 @@ The abstract from the paper is the following: Image Transformer has recently ach ## Predicted Entities +``label``. + {:.btn-box} diff --git a/docs/_posts/aymanechilah/2023-07-11-tabform_v1_en_3_2.md b/docs/_posts/aymanechilah/2023-07-11-tabform_v1_en_3_2.md index 5c6231a02c..ed7512c7a3 100644 --- a/docs/_posts/aymanechilah/2023-07-11-tabform_v1_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-07-11-tabform_v1_en_3_2.md @@ -22,6 +22,8 @@ Model for table and form detection in documents. It is based on text detection m ## Predicted Entities +``table``, ``form``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_TABLE_FORM_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrImageTableAndFormDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-09-14-lilt_roberta_funsd_v1_en_3_2.md b/docs/_posts/aymanechilah/2023-09-14-lilt_roberta_funsd_v1_en_3_2.md index 158eec75ff..37b0ff9281 100644 --- a/docs/_posts/aymanechilah/2023-09-14-lilt_roberta_funsd_v1_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-09-14-lilt_roberta_funsd_v1_en_3_2.md @@ -26,6 +26,9 @@ In the abstract of the LiLT paper, the authors emphasize the growing importance ## Predicted Entities +``other``, ``b-header``, ``i-header``, ``b-question``, ``i-question``, ``b-answer``, ``i-answer``. + + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/VISUAL_DOCUMENT_KEYVALUES_NER_LILT/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOCRVisualDocumentNer-FormParsing.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-09-14-lilt_rvl_cdip_296K_en_3_2.md b/docs/_posts/aymanechilah/2023-09-14-lilt_rvl_cdip_296K_en_3_2.md index 72f442a920..6414e415f9 100644 --- a/docs/_posts/aymanechilah/2023-09-14-lilt_rvl_cdip_296K_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-09-14-lilt_rvl_cdip_296K_en_3_2.md @@ -26,6 +26,9 @@ In the LiLT paper's abstract, the authors emphasize the importance of structured ## Predicted Entities +``label``. + + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_CLASSIFIER/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOCRVisualDocumentClassifierLiLT.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-09-27-image_text_detector_dit_en_3_2.md b/docs/_posts/aymanechilah/2023-09-27-image_text_detector_dit_en_3_2.md index bc641ce2b9..38960dc902 100644 --- a/docs/_posts/aymanechilah/2023-09-27-image_text_detector_dit_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-09-27-image_text_detector_dit_en_3_2.md @@ -25,6 +25,8 @@ The abstract from the paper is the following: Image Transformer has recently ach ## Predicted Entities +``text_regions``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/TEXT_DETECTION_DIT/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrImageTextDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-11-20-docvqa_pix2struct_en_3_2.md b/docs/_posts/aymanechilah/2023-11-20-docvqa_pix2struct_en_3_2.md index 1c8546a396..2f79d9f047 100644 --- a/docs/_posts/aymanechilah/2023-11-20-docvqa_pix2struct_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-11-20-docvqa_pix2struct_en_3_2.md @@ -25,6 +25,8 @@ In this context, the Pix2Struct model, originally conceived as an image-to-text ## Predicted Entities +``answers``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/VISUAL_QUESTION_ANSWERING/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOcrVisualQuestionAnsweringPix2Struct.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2023-11-20-publaynet_dit_base_mrcnn_en_3_2.md b/docs/_posts/aymanechilah/2023-11-20-publaynet_dit_base_mrcnn_en_3_2.md index 8399983363..7b13d40243 100644 --- a/docs/_posts/aymanechilah/2023-11-20-publaynet_dit_base_mrcnn_en_3_2.md +++ b/docs/_posts/aymanechilah/2023-11-20-publaynet_dit_base_mrcnn_en_3_2.md @@ -23,9 +23,11 @@ DiT, as proposed in the paper "DiT: Self-supervised Pre-training for Document Im ## Predicted Entities +``text``, ``title``, ``list``, ``table``, ``figure``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/LAYOUT_ANALYSIS/){:.button.button-orange.button-orange-trans.co.button-icon} -[Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOCRDitLayoutAnalyze.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} +[Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOCRDocumentLayoutAnalyzer.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} [Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/clinical/ocr/publaynet_dit_base_mrcnn_jsl_en_5.0.0_3.0_1698062080337.zip){:.button.button-orange.button-orange-trans.arr.button-icon} ## How to use diff --git a/docs/_posts/aymanechilah/2024-03-15-docvqa_pix2struct_jsl_opt_en_3_2.md b/docs/_posts/aymanechilah/2024-03-15-docvqa_pix2struct_jsl_opt_en_3_2.md index d8e9306829..616b10bafc 100644 --- a/docs/_posts/aymanechilah/2024-03-15-docvqa_pix2struct_jsl_opt_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-03-15-docvqa_pix2struct_jsl_opt_en_3_2.md @@ -25,6 +25,8 @@ In this context, the Pix2Struct model, originally conceived as an image-to-text ## Predicted Entities +``answers``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/VISUAL_QUESTION_ANSWERING/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOcrVisualQuestionAnsweringJsl.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-03-15-image_handwritten_detector_jsl_en_3_2.md b/docs/_posts/aymanechilah/2024-03-15-image_handwritten_detector_jsl_en_3_2.md index 1ca93dfdae..05b122d7e6 100644 --- a/docs/_posts/aymanechilah/2024-03-15-image_handwritten_detector_jsl_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-03-15-image_handwritten_detector_jsl_en_3_2.md @@ -22,6 +22,8 @@ Object detection model trained to detect handwritten text one of the foremost ar ## Predicted Entities +``hw``, ``signature``. + [Live Demo](https://demo.johnsnowlabs.com/ocr/DETECT_HANDWRITTEN/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOCRHandwrittenAndSignatureDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} [Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/clinical/ocr/image_handwritten_detector_jsl_en_5.1.2_3.0_1703781670000.zip){:.button.button-orange.button-orange-trans.arr.button-icon.hidden} diff --git a/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_en_3_2.md b/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_en_3_2.md index 5ce78ccf87..59c34ec121 100644 --- a/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_en_3_2.md @@ -22,6 +22,8 @@ Object detection model trained to detect table cells one of the foremost archite ## Predicted Entities +``cells``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_REGION_CELL_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOcrImageTableRecognitionWHOCR.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_v2_en_3_2.md b/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_v2_en_3_2.md index 6327e54f36..942f818df6 100644 --- a/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_v2_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-03-15-region_cell_detection_v2_en_3_2.md @@ -23,6 +23,8 @@ Object detection model trained to detect table cells one of the foremost archite ## Predicted Entities +``cells``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_REGION_CELL_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOcrImageTableRecognitionWHOCR.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-03-15-table_detection_v3_en_3_2.md b/docs/_posts/aymanechilah/2024-03-15-table_detection_v3_en_3_2.md index fadf31dc4c..1a82d7f4f0 100644 --- a/docs/_posts/aymanechilah/2024-03-15-table_detection_v3_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-03-15-table_detection_v3_en_3_2.md @@ -22,6 +22,8 @@ Object detection model trained to detect tables leverages one of the foremost ar ## Predicted Entities +``table``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/IMAGE_TABLE_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOcrImageTableDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-04-15-chart_to_text_deplot_jsl_en_3_2.md b/docs/_posts/aymanechilah/2024-04-15-chart_to_text_deplot_jsl_en_3_2.md index 2e3e8e0dfc..805d5b3198 100644 --- a/docs/_posts/aymanechilah/2024-04-15-chart_to_text_deplot_jsl_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-04-15-chart_to_text_deplot_jsl_en_3_2.md @@ -24,6 +24,7 @@ DePlot, as outlined in the paper "DePlot: One-shot visual language reasoning by ## Predicted Entities +``answers``. {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/PDF_CHART_TO_TEXT/){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-04-15-checkbox_detector_v1_en_3_2.md b/docs/_posts/aymanechilah/2024-04-15-checkbox_detector_v1_en_3_2.md index a59747f740..585118f93e 100644 --- a/docs/_posts/aymanechilah/2024-04-15-checkbox_detector_v1_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-04-15-checkbox_detector_v1_en_3_2.md @@ -22,6 +22,8 @@ Object detection model trained to detect document checkboxes one of the foremost ## Predicted Entities +``Unchecked``, ``Checked``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/CHECKBOX_DETECTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/SparkOcrCheckBoxDetection.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-04-15-info_docvqa_pix2struct_jsl_base_opt_en_3_2.md b/docs/_posts/aymanechilah/2024-04-15-info_docvqa_pix2struct_jsl_base_opt_en_3_2.md index 546e5f1ac1..7d3e58d56b 100644 --- a/docs/_posts/aymanechilah/2024-04-15-info_docvqa_pix2struct_jsl_base_opt_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-04-15-info_docvqa_pix2struct_jsl_base_opt_en_3_2.md @@ -25,6 +25,8 @@ In this context, the Pix2Struct model, originally conceived as an image-to-text ## Predicted Entities +``answers``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/INFOGRAPHIC_VISUAL_QUESTION_ANSWERING/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/spark-ocr-workshop/blob/master/jupyter/Cards/SparkOCRInfographicsVisualQuestionAnswering.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v1_en_3_2.md b/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v1_en_3_2.md index 3fb3669b95..74c75c33b0 100644 --- a/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v1_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v1_en_3_2.md @@ -25,6 +25,8 @@ GeoLayoutLM is designed as a multi-modal framework that handles tasks like Seman ## Predicted Entities +``other``, ``b-header``, ``i-header``, ``b-question``, ``i-question``, ``b-answer``, ``i-answer``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/RELATION_EXTRACTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/visual-nlp-workshop/blob/master/jupyter/FormRecognition/FormRecognitionGeo.ipynb){:.button.button-orange.button-orange-trans.co.button-icon} diff --git a/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v2_en_3_2.md b/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v2_en_3_2.md index a739a85500..d3ae92b6a2 100644 --- a/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v2_en_3_2.md +++ b/docs/_posts/aymanechilah/2024-09-30-visual_re_geo_v2_en_3_2.md @@ -25,6 +25,8 @@ GeoLayoutLM is designed as a multi-modal framework that handles tasks like Seman ## Predicted Entities +``other``, ``b-header``, ``i-header``, ``b-question``, ``i-question``, ``b-answer``, ``i-answer``. + {:.btn-box} [Live Demo](https://demo.johnsnowlabs.com/ocr/RELATION_EXTRACTION/){:.button.button-orange.button-orange-trans.co.button-icon} [Open in Colab](https://github.com/JohnSnowLabs/visual-nlp-workshop/blob/master/jupyter/FormRecognition/FormRecognitionGeo.ipynb){:.button.button-orange.button-orange-trans.co.button-icon}