summaryrefslogtreecommitdiff
path: root/data/rbot/plugins
diff options
context:
space:
mode:
authorGiuseppe Bilotta <giuseppe.bilotta@gmail.com>2006-10-24 15:24:13 +0000
committerGiuseppe Bilotta <giuseppe.bilotta@gmail.com>2006-10-24 15:24:13 +0000
commit43ac960aa89e5a02291fe875627dac88ae7fda34 (patch)
tree6d0cb13f4c299978174d72730c602cd274bbfdba /data/rbot/plugins
parent77512a98814b8c8ae5e6314a8bdf30b1967d95d2 (diff)
Initial implementation of proper caching based on last-modified and etag HTTP headers
Diffstat (limited to 'data/rbot/plugins')
-rw-r--r--data/rbot/plugins/demauro.rb2
-rw-r--r--data/rbot/plugins/digg.rb2
-rw-r--r--data/rbot/plugins/freshmeat.rb2
-rw-r--r--data/rbot/plugins/grouphug.rb2
-rw-r--r--data/rbot/plugins/quiz.rb2
-rw-r--r--data/rbot/plugins/rss.rb2
-rw-r--r--data/rbot/plugins/search.rb2
-rw-r--r--data/rbot/plugins/slashdot.rb4
-rw-r--r--data/rbot/plugins/tube.rb4
-rw-r--r--data/rbot/plugins/urban.rb8
10 files changed, 15 insertions, 15 deletions
diff --git a/data/rbot/plugins/demauro.rb b/data/rbot/plugins/demauro.rb
index 436a0bd3..9f5fc218 100644
--- a/data/rbot/plugins/demauro.rb
+++ b/data/rbot/plugins/demauro.rb
@@ -16,7 +16,7 @@ class DeMauroPlugin < Plugin
def demauro(m, params)
parola = params[:parola].downcase
url = @wapurl + "index.php?lemma=#{URI.escape(parola)}"
- xml = @bot.httputil.get(url)
+ xml = @bot.httputil.get_cached(url)
if xml.nil?
info = @bot.httputil.last_response
info = info ? "(#{info.code} - #{info.message})" : ""
diff --git a/data/rbot/plugins/digg.rb b/data/rbot/plugins/digg.rb
index 8663bd0f..8e3aeb4e 100644
--- a/data/rbot/plugins/digg.rb
+++ b/data/rbot/plugins/digg.rb
@@ -13,7 +13,7 @@ class DiggPlugin < Plugin
def digg(m, params)
max = params[:limit].to_i
debug "max is #{max}"
- xml = @bot.httputil.get(URI.parse("http://digg.com/rss/index.xml"))
+ xml = @bot.httputil.get_cached(URI.parse("http://digg.com/rss/index.xml"))
unless xml
m.reply "digg news parse failed"
return
diff --git a/data/rbot/plugins/freshmeat.rb b/data/rbot/plugins/freshmeat.rb
index eb2dbdf7..c8f529cb 100644
--- a/data/rbot/plugins/freshmeat.rb
+++ b/data/rbot/plugins/freshmeat.rb
@@ -12,7 +12,7 @@ class FreshmeatPlugin < Plugin
search = params[:search].to_s
max = 8 if max > 8
begin
- xml = @bot.httputil.get(URI.parse("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{URI.escape(search)}"))
+ xml = @bot.httputil.get_cached(URI.parse("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{URI.escape(search)}"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "illegal search string #{search}"
return
diff --git a/data/rbot/plugins/grouphug.rb b/data/rbot/plugins/grouphug.rb
index aa3bf6d7..53fc7f0a 100644
--- a/data/rbot/plugins/grouphug.rb
+++ b/data/rbot/plugins/grouphug.rb
@@ -14,7 +14,7 @@ class GrouphugPlugin < Plugin
path = "/random"
path = "/confessions/#{m.params()}" if m.params()
begin
- data = bot.httputil.get(URI.parse("http://grouphug.us/#{path}"))
+ data = bot.httputil.get_cached(URI.parse("http://grouphug.us/#{path}"))
reg = Regexp.new( '(<td class="conf-text")(.*?)(<p>)(.*?)(</p>)', Regexp::MULTILINE )
confession = reg.match( data )[4]
diff --git a/data/rbot/plugins/quiz.rb b/data/rbot/plugins/quiz.rb
index 4cd26f15..629b7232 100644
--- a/data/rbot/plugins/quiz.rb
+++ b/data/rbot/plugins/quiz.rb
@@ -116,7 +116,7 @@ class QuizPlugin < Plugin
# Wiki data
begin
- serverdata = @bot.httputil.get( URI.parse( "http://amarok.kde.org/amarokwiki/index.php/Rbot_Quiz" ) )
+ serverdata = @bot.httputil.get_cached( URI.parse( "http://amarok.kde.org/amarokwiki/index.php/Rbot_Quiz" ) )
serverdata = serverdata.split( "QUIZ DATA START\n" )[1]
serverdata = serverdata.split( "\nQUIZ DATA END" )[0]
serverdata = serverdata.gsub( /&nbsp;/, " " ).gsub( /&amp;/, "&" ).gsub( /&quot;/, "\"" )
diff --git a/data/rbot/plugins/rss.rb b/data/rbot/plugins/rss.rb
index 192c079a..dcbccb8c 100644
--- a/data/rbot/plugins/rss.rb
+++ b/data/rbot/plugins/rss.rb
@@ -506,7 +506,7 @@ class RSSFeedsPlugin < Plugin
# Use 60 sec timeout, cause the default is too low
# Do not use get_cached for RSS until we have proper cache handling
# xml = @bot.httputil.get_cached(feed.url,60,60)
- xml = @bot.httputil.get(feed.url,60,60)
+ xml = @bot.httputil.get_cached(feed.url, 60, 60)
rescue URI::InvalidURIError, URI::BadURIError => e
report_problem("invalid rss feed #{feed.url}", e, m)
return
diff --git a/data/rbot/plugins/search.rb b/data/rbot/plugins/search.rb
index fd1aefdc..a498d47f 100644
--- a/data/rbot/plugins/search.rb
+++ b/data/rbot/plugins/search.rb
@@ -35,7 +35,7 @@ class SearchPlugin < Plugin
begin
- wml = @bot.httputil.get(url)
+ wml = @bot.httputil.get_cached(url)
rescue => e
m.reply "error googling for #{what}"
return
diff --git a/data/rbot/plugins/slashdot.rb b/data/rbot/plugins/slashdot.rb
index 30f58dc9..ef96a4b0 100644
--- a/data/rbot/plugins/slashdot.rb
+++ b/data/rbot/plugins/slashdot.rb
@@ -12,7 +12,7 @@ class SlashdotPlugin < Plugin
search = params[:search].to_s
begin
- xml = @bot.httputil.get(URI.parse("http://slashdot.org/search.pl?content_type=rss&query=#{URI.escape(search)}"))
+ xml = @bot.httputil.get_cached(URI.parse("http://slashdot.org/search.pl?content_type=rss&query=#{URI.escape(search)}"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "illegal search string #{search}"
return
@@ -53,7 +53,7 @@ class SlashdotPlugin < Plugin
debug params.inspect
max = params[:limit].to_i
debug "max is #{max}"
- xml = @bot.httputil.get(URI.parse("http://slashdot.org/slashdot.xml"))
+ xml = @bot.httputil.get_cached(URI.parse("http://slashdot.org/slashdot.xml"))
unless xml
m.reply "slashdot news parse failed"
return
diff --git a/data/rbot/plugins/tube.rb b/data/rbot/plugins/tube.rb
index 85316718..0a9feb2f 100644
--- a/data/rbot/plugins/tube.rb
+++ b/data/rbot/plugins/tube.rb
@@ -13,7 +13,7 @@ class TubePlugin < Plugin
def tube(m, params)
line = params[:line]
begin
- tube_page = @bot.httputil.get(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"), 1, 1)
+ tube_page = @bot.httputil.get_cached(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"), 1, 1)
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "Cannot contact Tube Service Status page"
return
@@ -42,7 +42,7 @@ class TubePlugin < Plugin
def check_stations(m, params)
begin
- tube_page = @bot.httputil.get(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"))
+ tube_page = @bot.httputil.get_cached(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "Cannot contact Tube Service Status page"
return
diff --git a/data/rbot/plugins/urban.rb b/data/rbot/plugins/urban.rb
index 95210b25..d2ea8645 100644
--- a/data/rbot/plugins/urban.rb
+++ b/data/rbot/plugins/urban.rb
@@ -18,7 +18,7 @@ class UrbanPlugin < Plugin
end
# we give a very high 'skip' because this will allow us to get the number of definitions by retrieving the previous definition
uri = URI.parse("http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=65536")
- page = @bot.httputil.get(uri)
+ page = @bot.httputil.get_cached(uri)
if page.nil?
m.reply "Couldn't retrieve an urban dictionary definition of #{words}"
return
@@ -39,7 +39,7 @@ class UrbanPlugin < Plugin
end
if n < numdefs
uri = URI.parse("http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=#{n-1}")
- page = @bot.httputil.get(uri)
+ page = @bot.httputil.get_cached(uri)
if page.nil?
case n % 10
when 1
@@ -77,7 +77,7 @@ class UrbanPlugin < Plugin
end
def uotd(m, params)
- home = @bot.httputil.get("http://www.urbanwap.com/")
+ home = @bot.httputil.get_cached("http://www.urbanwap.com/")
if home.nil?
m.reply "Couldn't get the urban dictionary word of the day"
return
@@ -85,7 +85,7 @@ class UrbanPlugin < Plugin
home.match(/Word of the Day: <a href="(.*?)">.*?<\/a>/)
wotd = $1
debug "Urban word of the day: #{wotd}"
- page = @bot.httputil.get(wotd)
+ page = @bot.httputil.get_cached(wotd)
if page.nil?
m.reply "Couldn't get the urban dictionary word of the day"
else