install.packages("rtweet") install.packages("revgeo") install.packages("tm") install.packages("tidytext") install.packages("igraph") install.packages("dplyr") install.packages("ggplot2") install.packages("tidyverse") install.packages("stringr") install.packages("scales") install.packages("furrr") install.packages("wydir") install.packages("ggraph") library(rtweet) library(revgeo) library(tm) library(tidytext) library(igraph) library(dplyr) library(ggplot2) library(tidyverse) library(stringr) library(knitr) library(scales) library(rtweet) ## search for 18000 tweets using the metoo hashtag rt <- search_tweets( "#MeToo", n = 18000, include_rts = FALSE ) ## search for 250,000 tweets containing the word data rt <- search_tweets( "data", n = 250000, retryonratelimit = TRUE ) ## search for 10,000 tweets sent from the US rt <- search_tweets( "MeToo", geocode = lookup_coords("usa"), n = 1000 ) ## create lat/lng variables using all available tweet and profile geo-location data rt <- lat_lng(rt) ## plot state boundaries par(mar = c(0, 0, 0, 0)) maps::map("state", lwd = .25) ## plot lat and lng points onto state map with(rt, points(lng, lat, pch = 20, cex = .75, col = rgb(0, .3, .7, .75))) ## random sample for 30 seconds (default) rt <- stream_tweets("") ## stream tweets for a week (60 secs x 60 mins * 24 hours * 7 days) Rt<-stream_tweets( "",timeout = 60 * 60 * 24 * 7,) ## get user IDs of accounts followed by Matteo Salvini Matteo_friends <- get_friends("@matteosalvinimi") ## lookup data on those accounts Matteo_friends_data <- lookup_users(Matteo_friends$user_id) ## get user IDs of accounts following Matteo Salvini Matteo_followers <- get_followers("@matteosalvinimi", n = 75000) ## lookup data on those accounts Matteo_followers_data <- lookup_users(Matteo_followers$user_id) tmls <- get_timelines(c("@GiorgiaMeloni", "@matteosalvinimi", "@luigidimaio","@nzingaretti","@emmabonino","@lauraboldrini","@matteorenzi"), n = 320) ##For writing your data in csv format attach (rt) #OR attach (tmls) tweetsc<-data.frame(user_id,screen_name,created_at,text,favorite_count,retweet_count,lang,location,followers_count,friends_count,lat,lng,name,listed_count,statuses_count) write.csv2(tweetsc,"filename.csv") -------------------------------------------------------------------------------------------------------------------------------------------- library(RedditExtractoR) ## Look for links containing the word Emergency links <- reddit_urls(search_terms = "Emergency", page_threshold = 2, cn_threshold= , subreddit =, regex_filter =, sort_by = ) ## Look for contents contained in the links content <- reddit_content(links$URL) ## Create a csv dataset write.cvs(content,"filename.csv") ## Look for content in one discussion content1 <- reddit_content(links$URL[1]) ## Plot the discussion's network graph <- construct_graph(content1, plot = TRUE) ## Plot the users' network user <- user_network(content, include_author = TRUE, agg = TRUE) user$plot