Versioner sammenlignet

Nøgle

  • Linjen blev tilføjet.
  • Denne linje blev fjernet.
  • Formatering blev ændret.

...

Kodeblok
from atlassian import Confluence
import convert
import requests
from requests.auth import HTTPBasicAuth
from xml.sax.saxutils import escape
import os
import config
from slugify import slugify

confluence = Confluence(
    url=config.conflunce_url,
    username=config.username,
    password=config.password)

def make_slug(title):

    slug = slugify(title)
    return slug

def createwhen_to_xwkixwiki_page(confluence_page_id,title,parent_url,contentdatetime(when):

    # Incoming from cont_bool = FalseConfluence REST: 2011-09-06T12:03:35.000+02:00
    # Return slug = make_slug(title)format: 2025-09-28 10:00:00

    url = parent_url + "/spaces/" + slug + "/pages/WebHome"return when.replace("T"," ").replace(".000+02:00","")
    
def create_xwki_page(confluence_page_id,title,parent_url,xwiki_path,content,when,page_type):

   xml cont_bool = "<page xmlns=\"http://www.xwiki.org\">" False
    slug = make_slug(title)
    xmlurl = xmlparent_url + "<title>/spaces/" + titleslug + "</pages/title>WebHome"

    (result, xmllog) = xmlconvert.convert(xwiki_path + "<syntax>xwiki/2.1</syntax>"
   + slug,content)
   
    xml = "<page xmlns=\"http://www.xwiki.org\">"
    xml = xml + "<content><title>" +  escape(content)title + "</content></page>title>"

    headersxml = {xml + "<syntax>xwiki/2.1</syntax>"
    xml = xml + "Content-Type<content>":  +  escape(result) + "</content></page>"

    headers = {
        "Content-Type": "application/xml"
    }

    response = requests.put(url, data=xml, headers=headers, auth=HTTPBasicAuth(config.username,config.password))

    #Check the response
    if response.status_code == 201:
        print("Page " + title + " created (" + page_type + ")")
        cont_bool = True
    else:
        if response.status_code == 202:
            print("Page " + title + " updated (" + page_type + ")")
            cont_bool = True
        else:
            print("Status code:", response.status_code)
            print("Response:", response.text)


    if cont_bool == True:
    
        #Labels
        #labelslabels = confluence.get_page_labels(confluence_page_id, prefix=None, start=None, limit=None)
        #iffor label in labels["results"]:

        #    attach_labelslabel_to_page(wiki_space,slug,labelsparent_url + "/spaces/" + slug + "/pages/WebHome",label["name"])

        #Attachments
        directory#TO = "attachments/" + confluenceDO: https://www.mos-eisley.dk/spaces/it/pages/57245722/Commodore+C64 - more than 50 attachments... only 50 downloaded
        #Gitgus issue: https://github.com/atlassian-api/atlassian-python-api/issues/1590
        directory = "attachments/" + confluence_page_id
        os.makedirs(directory, exist_ok=True)

        attachments = confluence.download_attachments_from_page(confluence_page_id, path=directory)
   
        with os.scandir(directory) as entries:
            for entry in entries:
                if entry.is_file():
                    attach_file_to_page(parent_url + "/spaces/" + slug + "/pages/WebHome",entry.path,entry.name)

    #Children
    children = if page_type == "Page":

        #Children
        children = confluence.get_page_child_by_type(confluence_page_id, type='page', start=None, limit=None, expand=None)
        for page in children:

            page_id = page["id"]

            page1 = confluence.get_page_by_id(page_id=page_id,expand="body.storage,version")
            content = page1["body"]["storage"]["value"]
            titlewhen = page['title'prop["version"]["when"]

        print ("Preparing: " + title)
 = page['title']

            print (result, log) = convert.convert(content"Preparing: " + title)
        
            xwiki_page = create_xwki_page(page_id,title,parent_url + "/spaces/" + slug,xwiki_path + "." ,result+ slug,content,when,"Page")
    
    if page_type == "Blogpost":

def attach_file_to_page(url,file_path,filename        change_to_blog_post(url,slug,when)
        

def change_to_blog_post(url,slug,when):

    url = url + "/attachments/" + filenameobjects"

    headersxml = {"<object xmlns=\"http://www.xwiki.org\">"
    xml = xml + "Content-Type": "application/xml",<className>Blog.BlogPostClass</className>"
    xml = xml + "XWiki-AttachmentComment": "Uploaded by migrator"<properties><property>"
    }
xml =   
    with open(file_path, "rb") as f:xml + "<name>publishDate</name>"
    xml = xml + #files = {"file<value>": f}  # + when_to_xwiki_datetime(when) + "</value>"
    xml = xml  response = requests.put(url, data=f, headers=headers, auth=HTTPBasicAuth(config.username,config.password))+ "</property><property>"
    xml = xml + "<name>category</name><value>General</value></property></properties></object>"

    #headers Check= the{
 response
    if response.status_code == 201: "Content-Type": "application/xml"
    }

    print("Attachment " + filename + " created")
response = requests.post(url, data=xml, headers=headers, auth=HTTPBasicAuth(config.username,config.password))

    else:
#Check the   response
    if response.status_code == 202201:
            print("AttachmentBlog " + filenameslug + " updatedcreated" + url)
        else:cont_bool = True
    else:
        print("Status code:", if response.status_code) == 202:
            print("Response:", response.text)

def attach_labels_to_page(wiki_space,slug,labels):

Page " + title + " updated")
            cont_bool = True
    url = config.xwiki_url + "/rest/wikis/xwiki/spaces/" + wiki_space + "/pages/" + slug + "/tags"

 else:
            headers = {print("Status code:", response.status_code)
        "Content-Type": "application/xml",    print("Response:", response.text)    
    }
def attach_file_to_page(url,file_path,filename):

    xmlurl = url "<tags+ xmlns=\"http:/attachments/www.xwiki.org\">"" + filename

    forheaders label= in{
 labels:
       "Content-Type": "application/xml",
        xml = xml + "<tag>" + label + "</tag>"

    xml = xml + "</tag>"

    response = requests.put"XWiki-AttachmentComment": "Uploaded by migrator"
    }
    
    with open(file_path, "rb") as f:
        #files = {"file": f}  # 
        response = requests.put(url, data=xmlf, headers=headers, auth=HTTPBasicAuth(config.username,config.password))

    # Check the response
    if response.status_code == 201:
        print("PageAttachment " + titlefilename + " created")
    else:
        if response.status_code == 202:
            print("PageAttachment " + titlefilename + " updated")
        else:
            print("Status code:", response.status_code)
            print("Response:", response.text)

def attach_label_to_page(url,label):

#Start  on the XWiki
wiki_spaceurl = url + "Sandbox/objects" 

#Start  Space in Confluence
space_key = "ATLASSIAN"

space = confluence.get_space(space_key, expand='description.plain,homepage')
homepage_id=space['homepage']['id']
prop = confluence.get_page_by_id(homepage_id,expand="body.storage")
content = prop["body"]["storage"]["value"]
title = prop['title']

(result, log) = convert.convert(content)

xwiki_page = create_xwki_page(homepage_id,title, config.xwiki_url + "/rest/wikis/xwiki/spaces/Home",resultheaders = {
        "Content-Type": "application/xml",
    }

    xml = "<object xmlns=\"http://www.xwiki.org\">"
    xml = xml + "<className>XWiki.TagClass</className>"
    xml = xml + "<property name=\"tags\">"
    xml = xml + "<value>" + label + "</value>"
    xml = xml + " </property></object>"

    response = requests.post(url, data=xml, headers=headers, auth=HTTPBasicAuth(config.username,config.password))

    # Check the response
    if response.status_code == 201:
        print("Page " + title + " created")
    else:
        if response.status_code == 202:
            print("Page " + title + " updated")
        else:
            print("Status code:", response.status_code)
            print("Response:", response.text)


#Start on the XWiki
wiki_space = "Main" # XWiki Home
#wiki_space = "Sandbox" # XWki Sandbox

#Space to migrate in Confluence - Capital and lower case can be tricky
#Pages on same level as Home page is not migrated, only the tree structure from Home
space_key = "familieblog"

space = confluence.get_space(space_key, expand='description.plain,homepage')
homepage_id=space['homepage']['id']

# Blogposts
blog_posts = confluence.get_all_pages_from_space_as_generator(space_key, start=0, limit=1000, status=None, expand=None, content_type='blogpost')
for blog in blog_posts:

    blog_id=blog["id"]
    prop = confluence.get_page_by_id(blog_id,expand="body.storage,version")
    content = prop["body"]["storage"]["value"]
    when = prop["version"]["when"]
    title = prop['title']
    create_xwki_page(homepage_id,title, config.xwiki_url + "/rest/wikis/xwiki/spaces/Blog",wiki_space,content,when,"Blogpost")

exit(0)

prop = confluence.get_page_by_id(homepage_id,expand="body.storage,version")
content = prop["body"]["storage"]["value"]
when = prop["version"]["when"]
title = prop['title']

xwiki_page = create_xwki_page(homepage_id,title, config.xwiki_url + "/rest/wikis/xwiki/spaces/" + wiki_space,wiki_space,content,when,"Page")


Kodeblok
# https://github.com/faktorzehn/confluence2xwiki/blob/master/convert.py

from bs4 import BeautifulSoup
import bs4
from slugify import slugify

def make_slug(title):

    slug = slugify(title)
    return slug

log_string = ""

def log(msg):
	global log_string
	log_string += msg + "\n"

def parse_li(xwiki_path,element):
	return parse(xwiki_path,element.contents)

def parse_br(xwiki_path,element):
	return "\n"

def parse_em(xwiki_path,element): 
	return "//" + parse(xwiki_path,element.contents) + "//"

def parse_h1(xwiki_path,element):
	return "\n" + "= " + parse(xwiki_path,element.contents) + " =\n"

def parse_h2(xwiki_path,element):
	return "\n" + "== " + parse(xwiki_path,element.contents) + " =="

def parse_h3(xwiki_path,element):
	return "\n" + "=== " + parse(xwiki_path,element.contents) + " ==="
	
def parse_h4(xwiki_path,element):
	return "\n" + "==== " + parse(xwiki_path,element.contents) + " ===="

def parse_h5(xwiki_path,element):
	return "\n" + "===== " + parse(xwiki_path,element.contents) + " ====="

def parse_h6(xwiki_path,element):
	return "\n" + "====== " + parse(xwiki_path,element.contents) + " ======"

def parse_strong(xwiki_path,element): #bold
	return "**" + parse(xwiki_path,element.contents) + "**"
	
def parse_div(xwiki_path,element):
	return parse(xwiki_path,element.contents)

def parse_p(xwiki_path,element):
	return "\n" + parse(xwiki_path,element.contents).strip() + "\n"
	
def parse_u(xwiki_path,element):
	return "__" + parse(xwiki_path,element.contents).strip() + "__"
	
def parse_span(xwiki_path,element):
	return parse(xwiki_path,element.contents)
	
def parse_a(xwiki_path,element):
	if hasattr(element, "attrs"):
		if "href" in element.attrs:
			if parse(xwiki_path,element.contents) == element.attrs["href"]:
				return "[[" + element.attrs["href"] + "]]"
			else:
				return "[[" + parse(xwiki_path,element.contents) + ">>" + element.attrs["href"] + "]]"
		
		if "name" in element.attrs:
			log("Skipping anchor " + element.attrs["name"])
			return ""
			
		else:
			#raise Exception("Missing attributes")
			return ""
	else:
		raise Exception("Missing attributes 2")
	
def parse_ac_emoticon(xwiki_path,element):
	emoticons = {"smile": ":)", "wink": ";)", "yellow-star": "(*)", "tick": "(/)", "warning": "(!)", "question": "(?)"}
	
	name = element.attrs["ac:name"]
	
	if name in emoticons:
		return emoticons[name]
	else:
		log("Unkown emoticon " + name)
		return "(" + name + ")"


def parse_table(xwiki_path,element):
	data = []
	
	table_body = element.find('tbody')
	
	text = "\n"
	
	for row in table_body.find_all('tr'):		
		for col in row.contents:
			if col.name == "th" or col.name == "td":
				span = 1 if "colspan" not in col.attrs else int(col.attrs["colspan"])
				seperator = "|= " if col.name == "th" else "| "
				
				for i in range (0, span):
					if i == 0:
						text += seperator + parse(xwiki_path,col.contents).strip().replace("\n\n", "\n")
					else:
						text += seperator
			
		text += "\n"
		
	return(text)

def create_list(xwiki_path,element, syntax):
	text = ""	
	rows = element.find_all("li")
	
	for row in rows:
		text += "\n" + syntax + " " + parse(xwiki_path,row).strip()
		
	return text
	
def parse_dl(xwiki_path,element):
	return "\n"
	
def parse_ol(xwiki_path,element): #numbered list
	return create_list(xwiki_path,element, "1.")
	
def parse_ul(xwiki_path,element): #bulletpoints
	return create_list(xwiki_path,element, "* ")
	
def parse_code(xwiki_path,element):
	return "##" + parse(xwiki_path,element.contents) + "##"
	
def parse_pre(xwiki_path,element):
	return parse_code(xwiki_path,element)
	
def create_link(label, destination):
	if label == destination:
		return "[[" + destination + "]]"
	else:
		return "[[" + label + ">>" + destination + "]]"

def parse_ac_link(xwiki_path,element): #link
	label = destination = ""

	for link in element.contents:
		if link.name == "ri:page":
			label = destination = link.attrs["ri:content-title"]
		elif link.name == "ri:space":
			label = destination = link.attrs["ri:space-key"]
		
		elif link.name == "ac:plain-text-link-body":
			label = link.text
		
		elif type(link) == bs4.element.NavigableString and str(link) == "\n":
			continue
		elif link.name == "contentbylabel":
			continue
		
		else:
			log("Ignoring link of type " + link.name)
			return ""
	
	return create_link(label, destination)

def parse_ac_image(xwiki_path,element):
	#NPN Fix
	if element.find("ri:attachment") != None :
		return "[[image:" + element.find("ri:attachment").attrs["ri:filename"] + "]]"
	else:
		return ""

def parse_ac_structured_macro(xwiki_path,element):
	if element.attrs["ac:name"] == "include":
		return "{{include reference=\"" + xwiki_path + "." + make_slug(element.find("ri:page").attrs["ri:content-title"]) + ".WebHome\"/}}"
	
	elif element.attrs["ac:name"] == "excerpt-include":
		return create_link("include", element.find("ri:page").attrs["ri:content-title"])
	
	elif element.attrs["ac:name"] == "me-image":
		for parameter in element.findAll("ac:parameter"):
			acname = parameter.attrs["ac:name"]
			if acname == "image":
				image = parameter.text
			if acname == "path":
				path = parameter.text
			if acname == "group":
				group = parameter.text
			if acname == "thumbsize":
				thumbsize = parameter.text
			displaysize = "800"
			if acname == "displaysize":
				displaysize = parameter.text
		return "[[image:https://www.server.dk/data/" + path + "/" + image + "||data-xwiki-image-style-alignment=\"center\" data-xwiki-image-style-border=\"true\" width=\"" + displaysize + "\"]]"
	
	elif element.attrs["ac:name"] == "gallery":
		#https://extensions.xwiki.org/xwiki/bin/view/Extension/AttachmentGalleryMacro#Attachments
		return "{{velocity}}\n#set ($attachments = $doc.attachmentList)\n#if ($attachments.size() > 0)\n{{gallery}}\n#foreach($attachment in $attachments)\n#if($attachment.isImage())\n[[image:$attachment.filename]]\#end\n#end\n{{/gallery}}#end\n{{/velocity}}"
	
	elif element.attrs["ac:name"] == "brickset":
		for parameter in element.findAll("ac:parameter"):
			acname = parameter.attrs["ac:name"]
			if acname == "LID":
				lid = parameter.text
			if acname == "Name":
				bname = parameter.text

		return "== " + bname + " == [[image:http://images.brickset.com/sets/images/" + lid + "-1.jpg]]\n"
	
	elif element.attrs["ac:name"] == "code":
		language = element.find("ac:parameter")
		body = element.find("ac:plain-text-body").text
		if language == None:
			return "\n\n{{code}}\n" + body + "\n{{/code}}\n"
		else:
			return "\n\n{{code language=\"" + language.text + "\"}}\n" + body + "\n{{/code}}\n"
	
	elif element.attrs["ac:name"] == "excerpt":
		log("Removing excerpt")
		return parse(xwiki_path,element.find("ac:rich-text-body").contents)
	
	elif element.attrs["ac:name"] == "toc":
		return("{{toc/}}")
	
	elif element.attrs["ac:name"] == "panel":
		return parse(xwiki_path,element.find("ac:rich-text-body").contents)
	
	elif element.attrs["ac:name"] == "details":
		return parse(xwiki_path,element.find("ac:rich-text-body").contents)
	
	elif element.attrs["ac:name"] == "warning":
		return "{{error}}" + parse(xwiki_path,element.find("ac:rich-text-body").contents).strip() + "{{/error}}\n\n"
	elif element.attrs["ac:name"] == "note":
		return "{{warning}}" + parse(xwiki_path,element.find("ac:rich-text-body").contents).strip() + "{{/warning}}\n\n"
	elif element.attrs["ac:name"] == "info":
		return "{{info}}" + parse(xwiki_path,element.find("ac:rich-text-body").contents).strip() + "{{/info}}\n\n"
	elif element.attrs["ac:name"] == "tip":
		return "{{success}}" + parse(xwiki_path,element.find("ac:rich-text-body").contents).strip() + "{{/success}}\n\n"
	
	
	else:
		log("Ignoring structured macro of type " + element.attrs["ac:name"])
		return ""
	
def parse_ac_macro(xwiki_path,element):
	return parse_ac_structured_macro(element)

def parse_ac_layout(xwiki_path,element): #column layout
	text = ""
	
	sections = element.findAll("ac:layout-section")	
	for section in sections:
		if(section.attrs["ac:type"] == "single"):
			text += parse(xwiki_path,section.find("ac:layout-cell").contents).strip() + "\n"
		else:		
			text += "{{container layoutStyle=\"columns\"}}\n"			
			cells = section.findAll("ac:layout-cell")
			for cell in cells:
				text += "(((" + parse(xwiki_path,cell.contents).strip() + ")))\n"
			
			text += "{{/container}}\n"
			
	return text

def parse_ac_task_list(xwiki_path,element):
	text = "\n"
	
	for entry in element.findAll("ac:task", recursive = False):
		text +=  "* " + parse(entry.find("ac:task-body").contents).strip() + "\n"
	
	return text
	
	
def parse(xwiki_path,element):
	if isinstance(element, list):
		buffer = ""
		for child in element:
			buffer = buffer + parse(xwiki_path,child)
		return buffer
	
	if type(element) == bs4.element.NavigableString:
		if str(element) == "\n":
			return ""
		else:
			return str(element).replace("--", "~-~-")
	
	ignored = ["ac_placeholder"]
	name = str(element.name).replace("-", "_").replace(":", "_")
	
	if name in ignored:
		return ""
		
	# search for matching parse function and call it
	elif "parse_" + name in globals():
		return globals()["parse_" + name](xwiki_path,element)
	
	else:
		log("unknown: " + name)
		return ""

def convert(xwiki_path,text):
	global log_string
	soup = BeautifulSoup(text, "html.parser")
	contents = soup.contents
	while "\n" in contents: contents.remove("\n")
	return (parse(xwiki_path,contents).strip(), log_string.strip())