R: tidytext RPJP BAPPENAS: Difference between revisions
From OnnoCenterWiki
Jump to navigationJump to search
Onnowpurbo (talk | contribs) Created page with " install.packages("pdftools") library(pdftools) rpjp2005 <- pdf_text("RPJP_2005-2025.pdf") %>% strsplit(split = "\n") original_rpjp2005 <- rpjp2005 %>% group_by(book..." |
Onnowpurbo (talk | contribs) No edit summary |
||
| (13 intermediate revisions by the same user not shown) | |||
| Line 1: | Line 1: | ||
install.packages("rJava") | |||
install.packages("xlsx") | |||
install.packages("tm") | |||
install.packages("wordcloud") | |||
install.packages("ggplot2") | |||
install.packages("RWeka") | |||
library(xlsx) | |||
library(tm) | |||
library(wordcloud) | |||
library(ggplot2) | |||
library(tidyverse) | |||
library(tidytext) | |||
library(RWeka) | |||
library(tm) | |||
directory <- "data-pdf" | |||
# create corpus from pdfs | |||
docs <- VCorpus(DirSource(directory), readerControl = list(reader = readPDF)) | |||
# docs <- VCorpus(DirSource("data", recursive=TRUE)) | |||
# Get the document term matrices | |||
# dengan Stemming | |||
# | |||
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) | |||
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", | |||
removePunctuation = TRUE, | |||
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"), | |||
stemming = TRUE)) | |||
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer, | |||
removePunctuation = TRUE, | |||
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"), | |||
stemming = TRUE)) | |||
# tanpa Stemming | |||
# | |||
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) | |||
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", | |||
removePunctuation = TRUE, | |||
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))) | |||
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer, | |||
removePunctuation = TRUE, | |||
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))) | |||
inspect(dtm_unigram) | |||
inspect(dtm_bigram) | |||
Latest revision as of 05:20, 26 November 2019
install.packages("rJava")
install.packages("xlsx")
install.packages("tm")
install.packages("wordcloud")
install.packages("ggplot2")
install.packages("RWeka")
library(xlsx)
library(tm)
library(wordcloud)
library(ggplot2)
library(tidyverse) library(tidytext) library(RWeka) library(tm) directory <- "data-pdf" # create corpus from pdfs docs <- VCorpus(DirSource(directory), readerControl = list(reader = readPDF))
# docs <- VCorpus(DirSource("data", recursive=TRUE))
# Get the document term matrices
# dengan Stemming
#
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words",
removePunctuation = TRUE,
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
stemming = TRUE))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
removePunctuation = TRUE,
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
stemming = TRUE))
# tanpa Stemming
#
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words",
removePunctuation = TRUE,
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan")))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
removePunctuation = TRUE,
stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan")))
inspect(dtm_unigram) inspect(dtm_bigram)