# Utilities to read/write csv files
import csv
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
from http.cookiejar import CookieJar
from urllib.request import urlopen
#from bs4 import BeautifulSoup
import urllib
import json
from socket import error as SocketError
import html.parser
# OPZIONAL IMPORTS
# For timestamping/simple speed tests
from datetime import datetime
# Random number generator
from random import *
# System & command line utilities
import sys
# Json for the dictionary
import json
import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/CSV/corretti/'
export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/RDF/'
# Custom class to store URIs + related infos for the ontologies/repositories
class RDFcoords:
def __init__(self, uri, prefix, code=None):
self.uri = uri
self.prefix = prefix
self.code = code
# Repositories
museoCoords = RDFcoords('', 'mpp:')
autCoords = RDFcoords('', 'aut:')
cidocCoords = RDFcoords('', 'crm:')
aatCoords = RDFcoords('', 'aat:')
nsCoords = RDFcoords('', 'rdf:')
xsdCoords = RDFcoords('', 'xsd:')
iconCoords = RDFcoords('', 'ico:')
documentsCoords = RDFcoords('', 'ds:')
schemaCoords = RDFcoords('', 'schema:')
rdfCoords = RDFcoords('', 'rdf:')
hasNoteCoords = RDFcoords('', 'no:')
rdfsCoords = RDFcoords('', 'rdfs:')
# Basic functions for triples / shortened triples in TTL format
def triple(subject, predicate, object1):
line = subject + ' ' + predicate + ' ' + object1
return line
def doublet(predicate, object1):
line = ' ' + predicate + ' ' + object1
return line
def singlet(object1):
line = ' ' + object1
return line
# Line endings in TTL format
continueLine1 = ' ;\n'
continueLine2 = ' ,\n'
closeLine = ' .\n'
def writeTTLHeader(output):
output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)
output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)
output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)
output.write('@prefix ' + xsdCoords.prefix + ' ' + xsdCoords.uri + closeLine)
output.write('@prefix ' + iconCoords.prefix + ' ' + iconCoords.uri + closeLine)
output.write('@prefix ' + documentsCoords.prefix + ' ' + documentsCoords.uri + closeLine)
output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
output.write('@prefix ' + hasNoteCoords.prefix + ' ' + hasNoteCoords.uri + closeLine)
output.write('@prefix ' + rdfsCoords.prefix + ' ' + rdfsCoords.uri + closeLine)
output.write('\n')
filePrefix = 'AR20IMR'
fileType = '_Ospedale'
max_entries = 1000000000
with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
export_dir + filePrefix + fileType + '_LICENZA.ttl', 'w') as output:
reader = csv.DictReader(csv_file)
writeTTLHeader(output)
first = True
ii = 0
for row in reader:
# The index ii is used to process a limited number of entries for testing purposes
ii = ii + 1
# columnName = list(row)
url = row['URL']
# placeHolders
# if row['FTA0']!= '':
# datplaceHolder = museoCoords.prefix + url
id = row['FTA0'].replace(".jpeg","")
e36placeHolder = museoCoords.prefix + url + "_" + id
# e36e42placeHolder = museoCoords.prefix + url + "_" + id + "_E36_E42"
# line = triple(e36placeHolder, cidocCoords.prefix + 'P138_represents', datplaceHolder) + closeLine
# output.write(line)
# line = triple(e36placeHolder, nsCoords.prefix + 'type', cidocCoords.prefix + 'E36_Visual_Item') + closeLine
# output.write(line)
# line = triple(e36placeHolder, rdfsCoords.prefix + 'label', '\"Tavola\"') + closeLine
# output.write(line)
# line = triple(e36placeHolder, cidocCoords.prefix + 'P2_has_type', '\"Tavola\"') + closeLine
# output.write(line)
# line = triple(e36placeHolder, cidocCoords.prefix + 'P1_is_identified_by', e36e42placeHolder) + closeLine
# output.write(line)
# line = triple(e36e42placeHolder, nsCoords.prefix + 'type', cidocCoords.prefix + 'E42_Identifier') + closeLine
# output.write(line)
# line = triple(e36e42placeHolder, rdfsCoords.prefix + 'label', '\"'+row['FTA0']+ '\"') + closeLine
# output.write(line)
# if row['NOTE']!= '':
# line = triple(e36placeHolder, hasNoteCoords.prefix, '\"'+ row['NOTE']+'"') + closeLine
# output.write(line)
e62placeHolder = museoCoords.prefix + url + "_" + id + "_E36_E62"
e62e42placeHolder = museoCoords.prefix + url + "_" + id + "_E62_E42"
e36e30placeHolder = ""
e36e30e42placeHolder = ""
line = triple(e36placeHolder, cidocCoords.prefix + 'P3_has_note',
e62placeHolder) + closeLine
output.write(line)
line = triple(e62placeHolder, nsCoords.prefix + 'type',
cidocCoords.prefix + 'E62_String') + closeLine
output.write(line)
line = triple(e62placeHolder, rdfsCoords.prefix + 'label',
'\"Museo di Palazzo Pretorio di Prato\"') + closeLine
output.write(line)
line = triple(e62placeHolder, cidocCoords.prefix + 'P2_has_type',
'\"Provenienza immagine\"') + closeLine
output.write(line)
line = triple(e62placeHolder, cidocCoords.prefix + 'P1_is_identified_by',
"") + closeLine
output.write(line)
line = triple(e62e42placeHolder, nsCoords.prefix + 'type',
cidocCoords.prefix + 'E42_Identifier') + closeLine
output.write(line)
line = triple(e36placeHolder, cidocCoords.prefix + 'P104_is_subject_to',
e36e30placeHolder) + closeLine
output.write(line)
line = triple(e36e30placeHolder, nsCoords.prefix + 'type',
cidocCoords.prefix + 'E30_Right') + closeLine
output.write(line)
line = triple(e36e30placeHolder, rdfsCoords.prefix + 'label',
'\"CC BY NC SA\"') + closeLine
output.write(line)
line = triple(e36e30placeHolder, cidocCoords.prefix + 'P1_is_identified_by',
e36e30e42placeHolder) + closeLine
output.write(line)
line = triple(e36e30e42placeHolder, nsCoords.prefix + 'type',
cidocCoords.prefix + 'E42_Identifier') + closeLine
output.write(line)
line = triple(e36e30e42placeHolder, rdfsCoords.prefix + 'label',
'\"CC BY NC SA\"') + closeLine
output.write(line)
output.write('\n')
#
#
# Limit number of entries processed (if desired)
if (ii > max_entries):
break