# Utilities to read/write csv files
import csv
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
import json
# OPZIONAL IMPORTS
# For timestamping/simple speed tests
from datetime import datetime
# Random number generator
from random import *
# System & command line utilities
import sys
# Json for the dictionary
import json
import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/CSV/corretti/'
export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/RDF/'
# Custom class to store URIs + related infos for the ontologies/repositories
class RDFcoords:
def __init__(self, uri, prefix, code=None):
self.uri = uri
self.prefix = prefix
self.code = code
# Repositories
museoCoords = RDFcoords('', 'mpp:')
autCoords = RDFcoords('', 'aut:')
cidocCoords = RDFcoords('', 'crm:')
aatCoords = RDFcoords('', 'aat:')
nsCoords = RDFcoords('', 'rdf:')
schemaCoords = RDFcoords('', 'rdfs:')
# Basic functions for triples / shortened triples in TTL format
def triple(subject, predicate, object1):
line = subject + ' ' + predicate + ' ' + object1
return line
def doublet(predicate, object1):
line = ' ' + predicate + ' ' + object1
return line
def singlet(object1):
line = ' ' + object1
return line
# Line endings in TTL format
continueLine1 = ' ;\n'
continueLine2 = ' ,\n'
closeLine = ' .\n'
def writeTTLHeader(output):
output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)
output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)
output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)
output.write('\n')
filePrefix = 'AR20AUT_'
fileType = 'Ospedale'
max_entries = 1000000000
def get_role(role):
role_file = open('/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/CSV/AAT_RUOLI.csv', newline="")
reader = csv.DictReader(role_file)
for row in reader:
if row['Label'] == role:
return row['AAT']
with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
export_dir + filePrefix + fileType + '_roles.ttl', 'w') as output:
reader = csv.DictReader(csv_file)
writeTTLHeader(output)
first = True
ii = 0
for row in reader:
# The index ii is used to process a limited number of entries for testing purposes
ii = ii + 1
url = row['URL']
# placeHolders
datplaceHolder = museoCoords.prefix + url
# E12 - PC14 - E21
if row['AUTH'] != '':
aut_role = ''
if row['AUTQ'] != '':
aut_role = row['AUTQ']
ll = row['AUTN'] + '_' + aut_role
lab = ll.replace(' ', '')
label = lab.replace(',', '')
line = triple(museoCoords.prefix + '_' + label,
schemaCoords.prefix + 'label',
'\"' + row['AUTN'] + ' nel ruolo di artista' + '\"') + closeLine
output.write(line)
else:
aut_role = 'artista'
ll = row['AUTN'] + '_' + 'artista'
lab = ll.replace(' ', '')
label = lab.replace(',', '')
line = triple(museoCoords.prefix + '_' + label,
schemaCoords.prefix + 'label',
'\"' + row['AUTN'] + ' nel ruolo di artista' + '\"') + closeLine
output.write(line)
AuthorPlaceholder = autCoords.prefix + row['URL']
line = triple(museoCoords.prefix + '_' + label,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'PC14_carried_out_by') + closeLine
output.write(line)
line = triple(museoCoords.prefix + '_' + label,
cidocCoords.prefix + 'P02_has_range',
AuthorPlaceholder) + closeLine
output.write(line)
if aut_role != '' and aut_role != 'artista':
role = get_role(aut_role)
line = triple(museoCoords.prefix + '_' + label,
cidocCoords.prefix + 'P14.1_in_the_role_of',
aatCoords.prefix + role) + closeLine
output.write(line)
line = triple(aatCoords.prefix + role,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E55_Type') + closeLine
output.write(line)
line = triple(aatCoords.prefix + role,
schemaCoords.prefix + 'label',
'\"' + aut_role + '\"') + closeLine
output.write(line)
else:
role = 'artista'
line = triple(museoCoords.prefix + '_' + label,
cidocCoords.prefix + 'P14.1_in_the_role_of',
aatCoords.prefix + role) + closeLine
output.write(line)
line = triple(aatCoords.prefix + role,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E55_Type') + closeLine
output.write(line)
line = triple(aatCoords.prefix + role,
schemaCoords.prefix + 'label',
'\"' + aut_role + '\"') + closeLine
output.write(line)
output.write('\n')
#
#
# Limit number of entries processed (if desired)
if (ii > max_entries):
break