$site=$query->param('site');
# Settings
-if(-e "/usr/local/proj/co600_10" && -d "/usr/local/proj/co600_10"){
- # Assuming on Raptor
- my ($left) = "../webpages/left.inc";
- my ($title) = "../webpages/title.inc";
- my ($bottom) = "../webpages/bottom.inc";
-}
-elsif (-e "/home/sites/www.i-scream.org.uk" && -d "/home/sites/www.i-scream.org.uk"){
- # Assuming on Main Site
- my ($left) = "../left.inc";
- my ($title) = "../title.inc";
- my ($bottom) = "../bottom.inc";
-}
+my ($left) = "http://www.i-scream.org.uk/left.inc";
+my ($title) = "http://www.i-scream.org.uk/title.inc";
+my ($bottom) = "http://www.i-scream.org.uk/bottom.inc";
print "Content-type: text/html\n\n";
cvswww-extract.cgi
Script to extract webpages onto webserver
Created by tdb1 20/10/2000
- Last modified 20/10/2000
+ Last modified 25/11/2000
-->
<html>
<head>
- <title>The i-Scream Project CVS Website Extracter!</title>
+ <title>The i-Scream Project CVS Website Extracter</title>
</head>
<body bgcolor="#ffffff" link="#0000ff" alink="#3333cc" vlink="#3333cc" text="#000066">
<td valign="top">
EOF
-&print_file($left);
+print `wget -O - -q $left`;
print << "EOF";
<td valign="top">
EOF
-&print_file($title);
+print `wget -O - -q $title`;
print "<pre>\n";
print "</pre>\n";
-&print_file($bottom);
+print `wget -O - -q $bottom`;
print << "EOF";
EOF
exit 0;
-
-sub print_file ($) {
- my ($filename) = @_;
- print `cat $filename`;
-}
-
-sub print_file_old ($) {
- my ($filename) = @_;
- open(FILE, $filename) or die "Cannot open $filename: $!\n";
- while (my ($line) = <FILE>) {
- print $line;
- }
-}