To create a simple Java 8 application to use to extract text from PDFs and then identify people's names, I have create a simple data application. This can be used as part of a larger Data Processing Pipeline or HDF flow by calling it via REST, Command Line or converting it to a NiFi Processor.
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.dataflowdeveloper</groupId> <artifactId>categorizer</artifactId> <packaging>jar</packaging> <version>1.0</version> <name>categorizer</name> <url>http://maven.apache.org</url> <dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-simple</artifactId> <version>1.7.7</version> </dependency> <dependency> <groupId>org.apache.opennlp</groupId> <artifactId>opennlp-tools</artifactId> <version>1.7.0</version> </dependency> <dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.8.0</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.tika/tika-core --> <dependency> <groupId>org.apache.tika</groupId> <artifactId>tika-core</artifactId> <version>1.14</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.tika/tika-parsers --> <dependency> <groupId>org.apache.tika</groupId> <artifactId>tika-parsers</artifactId> <version>1.14</version> </dependency> <dependency> <groupId>org.apache.tika</groupId> <artifactId>tika-langdetect</artifactId> <version>1.14</version> </dependency> <dependency> <groupId>org.apache.lucene</groupId> <artifactId>lucene-core</artifactId> <version>3.5.0</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>2.5</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.7.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.7.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.7.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.7.3</version> </dependency> </dependencies> </project>
Java Application
package com.dataflowdeveloper; import java.io.BufferedInputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.nio.file.FileSystems; import java.nio.file.Files; import java.util.logging.Level; import java.util.logging.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import com.google.gson.Gson; import opennlp.tools.namefind.NameFinderME; import opennlp.tools.namefind.TokenNameFinderModel; import opennlp.tools.tokenize.SimpleTokenizer; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.tokenize.TokenizerME; import opennlp.tools.tokenize.TokenizerModel; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.Span; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import org.apache.tika.exception.TikaException; import org.apache.tika.metadata.Metadata; import org.apache.tika.parser.ParseContext; import org.apache.tika.parser.pdf.PDFParser; import org.apache.tika.sax.BodyContentHandler; import org.xml.sax.SAXException; public class App { public static void main(String args[]) { BodyContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); FileInputStream inputstream = null; try { inputstream = new FileInputStream(new File(System.getProperty("user.dir") + "/testdocs/opennlp.pdf")); } catch (FileNotFoundException e1) { e1.printStackTrace(); } ParseContext pcontext = new ParseContext(); // parsing the document using PDF parser PDFParser pdfparser = new PDFParser(); try { pdfparser.parse(inputstream, handler, metadata, pcontext); } catch (SAXException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } catch (TikaException e) { e.printStackTrace(); } NameFinder nameFinder = new NameFinder(); System.out.println(nameFinder.getPeople(handler.toString())); } } package com.dataflowdeveloper; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import com.google.gson.Gson; import opennlp.tools.namefind.NameFinderME; import opennlp.tools.namefind.TokenNameFinderModel; import opennlp.tools.tokenize.SimpleTokenizer; import opennlp.tools.tokenize.Tokenizer; import opennlp.tools.tokenize.TokenizerME; import opennlp.tools.tokenize.TokenizerModel; import opennlp.tools.util.InvalidFormatException; import opennlp.tools.util.Span; /** * */ public class NameFinder { private static final String CURRENT_DIR = System.getProperty("user.dir"); private static final String OLD_FILE = "/Volumes/Transcend/projects/categorizer/input/en-ner-person.bin"; private static final String CURRENT_FILE = CURRENT_DIR + "/input/en-ner-person.bin"; private static final String CURRENT_TOKEN_FILE = CURRENT_DIR + "/input/en-token.bin"; /** * sentence to people * @param sentence * @return JSON */ public String getPeople(String sentence) { // String outputJSON = ""; TokenNameFinderModel model = null; InputStream tokenStream = null; Tokenizer tokenizer = null; try { tokenStream = new FileInputStream( new File(CURRENT_TOKEN_FILE)); model = new TokenNameFinderModel( new File(CURRENT_FILE)); TokenizerModel tokenModel = new TokenizerModel(tokenStream); tokenizer = new TokenizerME(tokenModel); } catch (InvalidFormatException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } // Create a NameFinder using the model NameFinderME finder = new NameFinderME(model); // Split the sentence into tokens String[] tokens = tokenizer.tokenize(sentence); // Find the names in the tokens and return Span objects Span[] nameSpans = finder.find(tokens); List<PersonName> people = new ArrayList<PersonName>(); String[] spanns = Span.spansToStrings(nameSpans, tokens); for (int i = 0; i < spanns.length; i++) { people.add(new PersonName(spanns[i])); } outputJSON = new Gson().toJson(people); finder.clearAdaptiveData(); return "{\"names\":" + outputJSON + "}"; } }
Process
1. We open the file stream for reading (this can be from HDFS, S3 or a regular file system).
2. Then we use Apache Tika's PDF Parser to parse out the text. We also get the metadata for other processing.
3. Using OpenNLP we parse out all the names from that text.
4. Using Google GSON, we then turns the names into JSON for easy usage.
References
That is in com.dataflowdeveloper. It is a one method class I wrote to hold the string.
Oozie Hive 2 Action in a Kerberized cluster
Oozie - Simulating looping with sub workflows
Use NiFi to process Excel spreadsheets in automated workflows
How to run Variable inside Beeline:
Updating The Apache OpenNLP Community Apache NiFi Processor to Support Flow Files
Investigating LLAP cache hit rate
Open NLP Example Apache NiFi Processor
Using Regular Expressions to Extract Fields for Hive Tables
Parsing Any Document with Apache NiFi 1.5 with Apache Tika and Apache OpenNLP
Using Sentiment Analysis and NLP Tools With HDP 2.x and HDF 3.x
This website uses cookies for analytics, personalisation and advertising. To learn more or change your cookie settings, please read our Cookie Policy. By continuing to browse, you agree to our use of cookies.
HCC Guidelines | HCC FAQs | HCC Privacy Policy | Privacy Policy | Terms of Service
© 2011-2019 Hortonworks Inc. All Rights Reserved.
Hadoop, Falcon, Atlas, Sqoop, Flume, Kafka, Pig, Hive, HBase, Accumulo, Storm, Solr, Spark, Ranger, Knox, Ambari, ZooKeeper, Oozie and the Hadoop elephant logo are trademarks of the Apache Software Foundation.