123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462 |
- #Parser to convert the Datini onomastics CSV file into TTL format
- # Utilities to read/write csv files
- import csv
- # Utilities to handle character encodings
- import unicodedata
- # Ordered Dicts
- from collections import OrderedDict
- import json
- import re
- # OPZIONAL IMPORTS
- # For timestamping/simple speed tests
- from datetime import datetime
- # Random number generator
- from random import *
- # System & command line utilities
- import sys
- # Json for the dictionary
- import json
- import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/ASPO/CSV/ospedale/'
- export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/ASPO/RDF/ospedale/'
- # Custom class to store URIs + related infos for the ontologies/repositories
- class RDFcoords:
- def __init__(self, uri, prefix, code = None):
- self.uri = uri
- self.prefix = prefix
- self.code = code
- # Repositories
- aspoCoords = RDFcoords('<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/>', 'aspo:')
- foafCoords = RDFcoords('<http://xmlns.com/foaf/0.1/>', 'foaf:')
- cidocCoords = RDFcoords('<http://www.cidoc-crm.org/cidoc-crm/>', 'crm:')
- schemaCoords = RDFcoords('<http://schema.org/>', 'schema:')
- personCoords = RDFcoords('<http://www.w3.org/ns/person#>', 'person:')
- nsCoords = RDFcoords('<http://www.w3.org/1999/02/22-rdf-syntax-ns#>', 'rdf:')
- rdfsCoords = RDFcoords('<http://www.w3.org/2000/01/rdf-schema#>', 'rdfs:')
- owlCoords = RDFcoords('<http://www.w3.org/2002/07/owl#>', 'owl:')
- # Basic functions for triples / shortened triples in TTL format
- def triple(subject, predicate, object1):
- line = subject + ' ' + predicate + ' ' + object1
- return line
- def doublet(predicate, object1):
- line = ' ' + predicate + ' ' + object1
- return line
- def singlet(object1):
- line = ' ' + object1
- return line
- # Line endings in TTL format
- continueLine1 = ' ;\n'
- continueLine2 = ' ,\n'
- closeLine = ' .\n'
- def writeTTLHeader(output):
- output.write('@prefix ' + aspoCoords.prefix + ' ' + aspoCoords.uri + closeLine)
- output.write('@prefix ' + foafCoords.prefix + ' ' + foafCoords.uri + closeLine)
- output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
- output.write('@prefix ' + personCoords.prefix + ' ' + personCoords.uri + closeLine)
- output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
- output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
- output.write('@prefix ' + rdfsCoords.prefix + ' ' + rdfsCoords.uri + closeLine)
- output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
- output.write('\n')
- filePrefix = 'OSPEDALE - onomastica '
- fileType = '- persone singole'
- max_entries = 10000000000000
- with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
- export_dir + filePrefix + fileType + '.ttl', 'w') as output:
- reader = csv.DictReader(csv_file)
- writeTTLHeader(output)
- first = True
- ii = 0
- for row in reader:
- # The index ii is used to process a limited number of entries for testing purposes
- ii = ii + 1
- if row['entityType'] == 'person':
- id_aspo = row['recordId']
- #placeHolders
- aspoPlaceHolder = aspoCoords.prefix + id_aspo
- line = triple(aspoPlaceHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E21_Person') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder,
- nsCoords.prefix + 'type',
- personCoords.prefix + 'Person') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder,
- nsCoords.prefix + 'type',
- foafCoords.prefix + 'person') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder,
- cidocCoords.prefix + 'P1_is_identified_by',
- aspoPlaceHolder + "_E42") + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder + "_E42",
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E42_Identifier') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder + "_E42",
- rdfsCoords.prefix + 'label',
- '\"' + id_aspo + '\"') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder,
- foafCoords.prefix + 'name',
- '\"' + row['nameEntry@normal'] + '\"') + closeLine
- output.write(line)
- line = triple(aspoPlaceHolder,
- rdfsCoords.prefix + 'label',
- '\"' + row['nameEntry@normal'] + '\"') + closeLine
- output.write(line)
- if row['nome proprio'] != '':
- #Remove all white-space characters:
- txt = row['nome proprio']
- x = re.sub(" \n", "", txt)
- y = re.sub("\s\s", "", x)
- name = re.sub("\n", "", y)
- line = triple(aspoPlaceHolder,
- foafCoords.prefix + 'givenName',
- '\"' + name + '\"') + closeLine
- output.write(line)
- if row['nome di famiglia'] != '':
- #Remove all white-space characters:
- txt = row['nome di famiglia']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- line = triple(aspoPlaceHolder,
- foafCoords.prefix + 'familyName',
- '\"' + y + '\"') + closeLine
- output.write(line)
-
- if row['Alias'] != '' and row['Alias'] != ' ':
- #Remove all white-space characters:
- txt = row['Alias']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- line = triple(aspoPlaceHolder,
- schemaCoords.prefix + 'alternateName',
- '\"' + y + '\"') + closeLine
- output.write(line)
- if row['genere'] != '':
- #Remove all white-space characters:
- txt = row['genere']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- line = triple(aspoPlaceHolder,
- foafCoords.prefix + 'gender',
- '\"' + y + '\"') + closeLine
- output.write(line)
- if row['patronimico/matronimico'] != '':
- #Remove all white-space characters:
- txt = row['patronimico/matronimico']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- line = triple(aspoPlaceHolder,
- personCoords.prefix + 'patronymicName',
- '\"' + y + '\"') + closeLine
- output.write(line)
- if row['occupation'] != '' and row['occupation'] != ' ' :
- occupazioni = []
- pipe = "|"
- if pipe in row['occupation']:
- occupazioni = row['occupation'].split('|')
- for occupazione in occupazioni:
- #Remove all white-space characters:
- txt = occupazione
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- occ = re.sub(r'[^A-Za-z]','', y)
- occupationPlaceHolder = '<http://www.archiviodistato.prato.it/' + occ.replace(" ","_") + '>'
- line = triple(aspoPlaceHolder,
- schemaCoords.prefix + 'hasOccupation',
- occupationPlaceHolder) + closeLine
- output.write(line)
- line = triple(occupationPlaceHolder,
- nsCoords.prefix + 'type',
- schemaCoords.prefix + 'Occupation') + closeLine
- output.write(line)
- line = triple(occupationPlaceHolder,
- rdfsCoords.prefix + 'label',
- '\"' + y + '\"') + closeLine
- output.write(line)
- else:
- #Remove all white-space characters:
- txt = row['occupation']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- occ = re.sub(r'[^A-Za-z]','', y)
- occupationPlaceHolder = '<http://www.archiviodistato.prato.it/' + occ.replace(" ","_") + '>'
- line = triple(aspoPlaceHolder,
- schemaCoords.prefix + 'hasOccupation',
- occupationPlaceHolder) + closeLine
- output.write(line)
- line = triple(occupationPlaceHolder,
- nsCoords.prefix + 'type',
- schemaCoords.prefix + 'Occupation') + closeLine
- output.write(line)
- line = triple(occupationPlaceHolder,
- rdfsCoords.prefix + 'label',
- '\"' + y + '\"') + closeLine
- output.write(line)
-
- if (row['avo 1'] != ''):
- id = row['avo 1']
- E13placeHolder = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 1'].replace(' ', '_') + '_AVO1_' + row['recordId'] + ">"
- line = triple(E13placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
- output.write(line)
- line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
- output.write(line)
- line = triple(E13placeHolder,
- rdfsCoords.prefix + 'label',
- '\"Relazione: ' + row['avo 1'] + ' avo di secondo grado di ' + row['recordId'] + '\"') + closeLine
- output.write(line)
- if re.match(r'IT-ASPO', id):
- relazioneid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 1']+ ">"
- line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
- output.write(line)
- else:
- relazionenoid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 1'].replace(' ', '_').lower()+ ">"
- line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder ) + closeLine
- output.write(line)
- line = triple(relazionenoid,
- rdfsCoords.prefix + 'label',
- '\"' + row['avo 1'] + '\"') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E21_Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- personCoords.prefix + 'Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- foafCoords.prefix + 'person') + closeLine
- output.write(line)
- E55placeHolder = '<http://www.archiviodistato.prato.it/avo_secondo_grado>'
- line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
- output.write(line)
- line = triple(E55placeHolder,
- rdfsCoords.prefix + 'label',
- '\"Avo di secondo grado\"') + closeLine
- output.write(line)
- if (row['avo 2'] != ''):
- id = row['avo 2']
- E13placeHolder = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 2'].replace(' ', '_') + '_AVO2_' + row['recordId'] + ">"
- line = triple(E13placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
- output.write(line)
- line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
- output.write(line)
- line = triple(E13placeHolder,
- rdfsCoords.prefix + 'label',
- '\"Relazione: ' + row['avo 2'] + ' avo di terzo grado di ' + row['recordId'] + '\"') + closeLine
- output.write(line)
- if re.match(r'IT-ASPO', id):
- relazioneid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 2']+ ">"
- line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
- output.write(line)
- else:
- relazionenoid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['avo 2'].replace(' ', '_').lower()+ ">"
- line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder ) + closeLine
- output.write(line)
- line = triple(relazionenoid,
- rdfsCoords.prefix + 'label',
- '\"' + row['avo 2'] + '\"') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E21_Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- personCoords.prefix + 'Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- foafCoords.prefix + 'person') + closeLine
- output.write(line)
- E55placeHolder = '<http://www.archiviodistato.prato.it/avo_terzo_grado>'
- line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
- output.write(line)
- line = triple(E55placeHolder,
- rdfsCoords.prefix + 'label',
- '\"Avo di terzo grado\"') + closeLine
- output.write(line)
- if row['Qualifica'] != '':
- qualifiche = []
- pipe = "|"
- if pipe in row['Qualifica']:
- qualifiche = row['Qualifica'].split('|')
- for qualifica in qualifiche:
- #Remove all white-space characters:
- txt = qualifica
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", " ", x)
- line = triple(aspoPlaceHolder, schemaCoords.prefix + 'honorificPrefix', '\"' + str(y) + '\"') + closeLine
- output.write(line)
- else:
- #Remove all white-space characters:
- txt = row['Qualifica']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", " ", x)
- line = triple(aspoPlaceHolder, schemaCoords.prefix + 'honorificPrefix', '\"' + y + '\"') + closeLine
- output.write(line)
- if row['biogHist p'] != '':
- #Remove all white-space characters:
- txt = row['biogHist p']
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", " ", x)
- note = re.sub("\"", "", x)
- line = triple(aspoPlaceHolder,
- cidocCoords.prefix + 'P3_has_note',
- '\"' + note + '\"') + closeLine
- output.write(line)
-
- if row['Variante'] != '':
- varianti = []
- pipe = "|"
- if pipe in row['Variante']:
- varianti = row['Variante'].split('|')
- for variante in varianti:
- line = triple(aspoPlaceHolder,
- owlCoords.prefix + 'sameAs',
- aspoCoords.prefix + str(variante)) + closeLine
- output.write(line)
- else:
- line = triple(aspoPlaceHolder,
- owlCoords.prefix + 'sameAs',
- aspoCoords.prefix + row['Variante']) + closeLine
- output.write(line)
- if row['provenienza'] != '':
- e53placeHolder = "<http://www.archiviodistato.prato.it/" + row['provenienza'].replace('da', '').replace(' ', '') + ">"
- line = triple(aspoPlaceHolder,
- cidocCoords.prefix + 'P74_has_current_or_former_residence',
- e53placeHolder) + closeLine
- output.write(line)
- line = triple(e53placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E53_Place') + closeLine
- output.write(line)
- line = triple(e53placeHolder,
- rdfsCoords.prefix + 'label',
- '\"' + row['provenienza'] + '\"') + closeLine
- output.write(line)
- line = triple(e53placeHolder,
- cidocCoords.prefix + 'P2_has_type',
- '\"Provenienza\"') + closeLine
- output.write(line)
- if (row['recordID relazione'] != ''):
- id = row['recordID relazione']
- E13placeHolder = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['recordID relazione'].replace(' ', '_') + '_R_' + row['recordId'] + ">"
- line = triple(E13placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
- output.write(line)
- line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
- output.write(line)
- line = triple(E13placeHolder,
- rdfsCoords.prefix + 'label',
- '\"Relazione: ' + row['recordID relazione'] + row['nome relazione'] + ' di ' + row['recordId'] + '\"') + closeLine
- output.write(line)
- if re.match(r'IT-ASPO', id):
- relazioneid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['recordID relazione']+ ">"
- line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
- output.write(line)
- else:
- relazionenoid = '<http://www.archiviodistato.prato.it/accedi-e-consulta/aspoMV001/scheda/' + row['recordID relazione'].replace(' ', '_').lower()+ ">"
- line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
- output.write(line)
- line = triple(relazionenoid,
- rdfsCoords.prefix + 'label',
- '\"' + row['recordID relazione'] + '\"') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E21_Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- personCoords.prefix + 'Person') + closeLine
- output.write(line)
- line = triple(relazionenoid,
- nsCoords.prefix + 'type',
- foafCoords.prefix + 'person') + closeLine
- output.write(line)
- if (row['nome relazione'] != ''):
- relazioni = []
- pipe = "|"
- if pipe in row['nome relazione']:
- relazioni = row['nome relazione'].split('|')
- for relazione in relazioni:
- #Remove all white-space characters:
- txt = relazione
- x = re.sub("\n", " ", txt)
- y = re.sub("\s\s", "", x)
- rel = re.sub(r'[^A-Za-z]','', y)
- cleanlabel = rel.rstrip().lstrip()
- E55placeHolder = '<http://www.archiviodistato.prato.it/relation_' + cleanlabel.replace(" ","") + '>'
- line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
- output.write(line)
- line = triple(E55placeHolder,
- rdfsCoords.prefix + 'label',
- '\"' + cleanlabel + '\"') + closeLine
- output.write(line)
- else:
- E55placeHolder = '<http://www.archiviodistato.prato.it/relation_' + cleanlabel.replace(' ', '') + '>'
- line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
- output.write(line)
- cleanlabel = row['nome relazione'].rstrip().lstrip()
- line = triple(E55placeHolder,
- rdfsCoords.prefix + 'label',
- '\"' + cleanlabel + '\"') + closeLine
- output.write(line)
-
- output.write('\n')
- #
- #
- # Limit number of entries processed (if desired)
- if (ii > max_entries):
- break
|