Wordman/ProjectGezlak
- Back to Wordman/CommissionEngine
I'm starting Project Gezlak (named after a type of metal elemental that roams Autochthonia) to try to automatically fix the links that were broken during the process of converting to MediaWiki. This would probably be a lot easier with direct access to the server, but I don't have that.
The idea is to use the Python wikipedia robot framework to examine page content, detect certain kinds of damaged links, and repair them.
The script has been tested on a handful of pages, and seems to work; however, I can't rule out all mistakes. Tell me about these in the following sections:
Errors
If you notice the script had errors, list them here.
- error
Code
#!/usr/bin/python
import wikipedia as pywikibot
import pagegenerators
import re
import time
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
class AllPagesBot:
def __init__(self, generator, dry):
"""
Constructor. Parameters:
* generator - The page generator that determines on which pages
to work on.
* dry - If True, doesn't do any real changes, but only shows
what would have been changed.
"""
#self.generator = pagegenerators.AllpagesPageGenerator()
self.generator = pagegenerators.TextfilePageGenerator('exaltedall.txt')
self.dry = dry
self.pages = {}
def run(self):
while True:
try:
page = self.generator.next()
if page is None:
break;
aslink = page.aslink()
aslink = aslink.replace('[[','')
aslink = aslink.replace(']]','')
self.pages[aslink] = page
except:
break
#for link in self.pages.keys():
# print "\"%s\"," % (link)
class ExaltedRepairBot:
# Edit summary message that should be used.
# NOTE: Put a good description here, and add translations, if possible!
msg = {
'en': u'Testing script to fix links messed up in conversion'
}
def __init__(self, generator, dry, quiet, allpages):
"""
Constructor. Parameters:
* generator - The page generator that determines on which pages
to work on.
* dry - If True, doesn't do any real changes, but only shows
what would have been changed.
"""
self.allpages = allpages
self.generator = generator
self.dry = dry
self.quiet = quiet
# Set the edit summary message
self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
self.linkPat = re.compile(r'\[\[(.+?)\]\]')
self.splitPat = re.compile(r'\s*\|\s*')
self.levelPat = re.compile(r'\s*/\s*')
def run(self):
for page in self.generator:
try:
self.treat(page)
except Exception, err:
print " ERROR: Failed to process page %s when examining link %s: %s\n %s" % (page.aslink(), self.curLink, err, self.curLine)
def treat(self, page):
"""
Loads the given page, does some changes, and saves it.
"""
print ""
print "Processing page %s..." % (page.aslink())
try:
# Load the page
text = page.get()
except pywikibot.NoPage:
pywikibot.output(u"Page %s does not exist; skipping."
% page.aslink())
return
except pywikibot.IsRedirectPage:
pywikibot.output(u"Page %s is a redirect; skipping."
% page.aslink())
return
################################################################
# NOTE: Here you can modify the text in whatever way you want. #
################################################################
# If you find out that you do not want to edit this page, just return.
# Example: This puts the text 'Test' at the beginning of the page.
lines = text.splitlines(1)
newlines = [];
for line in lines:
newline = self.treatLine(line)
newlines.append(newline)
text = "".join(newlines)
# only save if something was changed
if text != page.get():
if not self.quiet:
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
# show what was changed
pywikibot.showDiff(page.get(), text)
if not self.dry:
if self.quiet:
choice = 'y'
else:
choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N')
if choice == 'y':
try:
# Save the page
page.put(text, comment=self.summary)
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked; skipping."
% page.aslink())
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict'
% (page.title()))
except pywikibot.SpamfilterError, error:
pywikibot.output(u'Cannot change %s because of spam blacklist entry %s'
% (page.title(), error.url))
else:
print " ...no changes."
time.sleep(3) # Don't flood the server
def treatLine(self, line):
str_list = []
lastPos = 0
matches = self.linkPat.finditer(line)
for match in matches:
fulllink = match.group(0)
link = match.group(1)
end = match.end(1)
try:
(newLink, end) = self.treatLink(line, link, end)
except Exception, err:
print " ERROR: Failed to process link %s: %s\n %s\n Continuing leaving link unaltered" % (self.curLink, err, self.curLine)
newLink = link
start = match.start(1)
str_list.append(line[lastPos:start])
str_list.append(newLink)
lastPos = end
if not self.quiet and link != newLink:
print " %s -> %s" % (link,newLink)
str_list.append(line[lastPos:])
return ''.join(str_list)
def treatLink(self, line, link, end):
self.curLink = link
self.curLine = line
sourceLast = end
theSplit = self.splitPat.split(link)
link = theSplit[0]
label = None
if len(theSplit) > 1:
label = theSplit[1]
if not self.pageExists(link):
(link,label,sourceLast) = self.repairMissingLink(line, link, label, sourceLast)
#if self.pageExists(link):
# (link,label,sourceLast) = self.repairExistingLink(line, link, label, sourceLast)
else:
print " Warning: after repairs, link %s still not found. Using anyway." % (link)
treated = link
if label:
treated += '|' + label
return (treated,sourceLast)
def pageExists(self, link):
cleanLink = link.replace('_',' ')
cleanLink = cleanLink[0].capitalize() + cleanLink[1:]
return self.allpages.has_key(cleanLink)
def repairExistingLink(self, line, link, label, end):
# "Prettify links starting with 'wordman'
if link.startswith('Wordman') and not label:
levels = self.levelPat.split(link)
if len(levels) > 1:
mainpage = levels[0]
subpage = levels[1]
name = self.space_out_camel_case(subpage).strip()
label = name
return (link, label, end)
def repairMissingLink(self, line, link, label, end):
if link.startswith('['):
(link, label, end) = self.repairBadLabeledLink(line, link, label, end)
levels = self.levelPat.split(link)
if len(levels) > 2:
(link, label, end) = self.repairMultiLevelLink(line, link, label, end, levels)
return (link, label, end)
def repairBadLabeledLink(self, line, link, label, end):
# First, get rid of the leading brackets in the link
link = link.replace('[','')
# Becuase of the pattern matching that got us this far, we know that the characters
# at line[end] are two closing brackets. Skip past them, then get everything until
# the next two closing brackets, which should be an attempt at a label that got corrupted.
assumption = line[end:end+2]
if assumption != ']]':
raise AssertionError
end += 2
remainder = line[end:]
index = remainder.find(']]')
if index == -1:
raise AssertionError
remainder = remainder[:index]
if index < 2:
# In this case, it looks like there might be a link like [[[[link]]]] or
# [[[link]] ]], so eliminate the extra brackets
pass
elif remainder.startswith('\''):
# In this case, there is a link with an apostrophe that got scrambled,
# like [[[page/sub]]'page]]. Append the remainder to the link.
link += remainder
end += index
else:
# Probably a label, like [[[link]] | label]]
(nothing,newlabel) = self.splitPat.split(remainder)
label = newlabel.strip()
# Tell the caller to ignore the extra line source we just consumed
end += index
return (link, label, end)
def repairMultiLevelLink(self, line, link, label, end, levels):
# In this case, the problem is usually that the name of and a/b page was just
# prepended blindly, rather than just the a. So, build a link from the first
# and last level.
testlink = levels[0] + '/' + levels[-1]
# If this link exists, use it
link = testlink;
return (link, label, end)
def space_out_camel_case(self, str, delimiter=' '):
"""Adds spaces to a camel case string. Failure to space out string returns the original string.
>>> space_out_camel_case('DMLSServicesOtherBSTextLLC')
'DMLS Services Other BS Text LLC'
"""
text = re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z]))', delimiter, str).strip()
if text != str:
# Uncapitalize articles
text = text.replace(' Of ', ' of ')
text = text.replace(' In ', ' in ')
text = text.replace(' To ', ' to ')
text = text.replace(' At ', ' at ')
text = text.replace(' The ', ' the ')
text = text.replace(' And ', ' and ')
return text
def main():
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
# The generator gives the pages that should be worked upon.
gen = None
# This temporary array is used to read the page title if one single
# page to work on is specified by the arguments.
pageTitleParts = []
# If dry is True, doesn't do any real changes, but only show
# what would have been changed.
dry = False
quiet = False
# Parse command line arguments
for arg in pywikibot.handleArgs():
if arg.startswith("-dry"):
dry = True
elif arg.startswith("-quiet"):
quiet = True
else:
# check if a standard argument like
# -start:XYZ or -ref:Asdf was given.
if not genFactory.handleArg(arg):
pageTitleParts.append(arg)
if pageTitleParts != []:
# We will only work on a single page.
pageTitle = ' '.join(pageTitleParts)
page = pywikibot.Page(pywikibot.getSite(), pageTitle)
gen = iter([page])
if not gen:
gen = genFactory.getCombinedGenerator()
if gen:
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
gen = pagegenerators.PreloadingGenerator(gen)
allbot = AllPagesBot(gen, dry)
allbot.run()
bot = ExaltedRepairBot(gen, dry, quiet, allbot.pages)
bot.run()
else:
pywikibot.showHelp()
if __name__ == "__main__":
try:
main()
finally:
pywikibot.stopme()
Comments
Great! I look forward to seeing the results. Thankyou! :) nikink
Screw it. Script running now. I probably could have done this better as an export/alter/import, but the hell with it. - Wordman
Seems to have worked a treat, as far as I can tell on my pages anyway. :) Thanks again! nikink Saved me a lot of work!