R: tidytext RPJP BAPPENAS: Difference between revisions

From OnnoCenterWiki
Jump to navigationJump to search
Onnowpurbo (talk | contribs)
No edit summary
Onnowpurbo (talk | contribs)
No edit summary
Line 15: Line 15:




docs <- VCorpus(DirSource("data", recursive=TRUE))
# Get the document term matrices
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words",
    removePunctuation = TRUE,
    stopwords = stopwords("english"),
    stemming = TRUE))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
    removePunctuation = TRUE,
    stopwords = stopwords("english"),
    stemming = TRUE))
inspect(dtm_unigram)
inspect(dtm_bigram)




original_converted <- converted %>%
  group_by(document) %>%
  ungroup()
original_converted





Revision as of 05:41, 6 November 2018

library(tidyverse)
library(tidytext)
library(tm)
directory <- "data-pdf"

# create corpus from pdfs
converted <- VCorpus(DirSource(directory), readerControl = list(reader = readPDF)) %>% 
  DocumentTermMatrix()

converted %>%
  tidy() %>%
  filter(!grepl("[0-9]+", term))
# converted adalah DocumentTermMatrix


docs <- VCorpus(DirSource("data", recursive=TRUE))
# Get the document term matrices
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", 
    removePunctuation = TRUE, 
    stopwords = stopwords("english"), 
    stemming = TRUE))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
    removePunctuation = TRUE,
    stopwords = stopwords("english"),
    stemming = TRUE))
inspect(dtm_unigram)
inspect(dtm_bigram)



Pranala Menarik