123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270 |
- # Utilities to read/write csv files
- import csv
- # Utilities to handle character encodings
- import unicodedata
- # Ordered Dicts
- from collections import OrderedDict
- import json
- # OPZIONAL IMPORTS
- # For timestamping/simple speed tests
- from datetime import datetime
- # Random number generator
- from random import *
- # System & command line utilities
- import sys
- # Json for the dictionary
- import json
- import_dir = '/Users/alessiaspadi/Documents/RESTORE/temp_MPP/tabelle/Datini/mod/'
- export_dir = '/Users/alessiaspadi/Documents/RESTORE/temp_MPP/tabelle/Carica/AUT/'
- # Custom class to store URIs + related infos for the ontologies/repositories
- class RDFcoords:
- def __init__(self, uri, prefix, code = None):
- self.uri = uri
- self.prefix = prefix
- self.code = code
- # Repositories
- museoCoords = RDFcoords('<http://palazzopretorio.comune.prato.it/it/le-opere/alcuni-capolavori/>', 'mpp:')
- autCoords = RDFcoords('<http://palazzopretorio.comune.prato.it/it/opere/autori/>', 'aut:')
- cidocCoords = RDFcoords('<http://www.cidoc-crm.org/cidoc-crm/>', 'crm:')
- aatCoords = RDFcoords('<http://vocab.getty.edu/aat/>', 'aat:')
- nsCoords = RDFcoords('<http://www.w3.org/1999/02/22-rdf-syntax-ns#>', 'rdf:')
- schemaCoords = RDFcoords('<http://www.w3.org/2000/01/rdf-schema#>', 'rdfs:')
- # Basic functions for triples / shortened triples in TTL format
- def triple(subject, predicate, object1):
- line = subject + ' ' + predicate + ' ' + object1
- return line
- def doublet(predicate, object1):
- line = ' ' + predicate + ' ' + object1
- return line
- def singlet(object1):
- line = ' ' + object1
- return line
- # Line endings in TTL format
- continueLine1 = ' ;\n'
- continueLine2 = ' ,\n'
- closeLine = ' .\n'
- def writeTTLHeader(output):
- output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)
- output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)
- output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
- output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)
- output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
- output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
- output.write('\n')
- filePrefix = 'AR20AUT_'
- fileType = 'Datini'
- max_entries = 1000000000
- with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
- export_dir + filePrefix + fileType + '.ttl', 'w') as output:
- reader = csv.DictReader(csv_file)
- writeTTLHeader(output)
- first = True
- ii = 0
- for row in reader:
- # The index ii is used to process a limited number of entries for testing purposes
- ii = ii + 1
- url = row['URL']
- #placeHolders
- e21placeHolder = autCoords.prefix + url
- e62placeHolder = autCoords.prefix + url + '_E62'
- e41placeHolder = autCoords.prefix + url + '_E41'
- e42placeHolder = autCoords.prefix + row['AUTH']
- e67placeHolder = autCoords.prefix + url + '_E67'
- e69placeHolder = autCoords.prefix + url + '_E69'
- line = triple(e21placeHolder, nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E21_Person') + closeLine
- output.write(line)
- line = triple(e21placeHolder, schemaCoords.prefix + 'label',
- '\"' + row['AUTN'] + ', ' + row['AUTA'] + '\"') + closeLine
- output.write(line)
- line = triple(e21placeHolder, cidocCoords.prefix + 'P3_has_note',
- e62placeHolder) + closeLine
- output.write(line)
- line = triple(e62placeHolder, nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E62_String') + closeLine
- output.write(line)
- line = triple(e62placeHolder, schemaCoords.prefix + 'label',
- '\"Fonte: Museo di Palazzo Pretorio - Collezione Ospedale\"') + closeLine
- output.write(line)
- #E21 - P1 - E42
- line = triple(e21placeHolder, cidocCoords.prefix + 'P1_is_identified_by',
- e42placeHolder) + closeLine
- output.write(line)
- line = triple(e42placeHolder, nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E42_Identifier') + closeLine
- output.write(line)
- line = triple(e42placeHolder, schemaCoords.prefix + 'label',
- '\"' + row['AUTH'] + '\"') + closeLine
- output.write(line)
- #E21 - P1 - E41
- '''line = triple(e21placeHolder, cidocCoords.prefix + 'P1_is_identified_by',
- e41placeHolder) + closeLine
- output.write(line)
- line = triple(e41placeHolder, nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E41_Appellation') + closeLine
- output.write(line)
- line = triple(e41placeHolder, schemaCoords.prefix + 'label',
- '\"' + row['AUTN'] + '\"') + closeLine
- output.write(line)'''
- # E21 - P107i - E74
- if row['AUTU'] != '':
- group = []
- if '/' in row['AUTU']:
- group = row['AUTU'].split('/')
- else:
- group.append(row['AUTU'])
- for gr in group:
- gg = gr.replace(' ', '')
- e74placeHolder = museoCoords.prefix + gg
- line = triple(e21placeHolder,
- cidocCoords.prefix + 'P107i_is_current_or_former_member_of',
- e74placeHolder) + closeLine
- output.write(line)
- line = triple(e74placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E74_Group') + closeLine
- output.write(line)
- line = triple(e74placeHolder,
- schemaCoords.prefix + 'label',
- '\"' + row['AUTU'] + '\"') + closeLine
- output.write(line)
- #E21 - P98i - E67
- line = triple(e21placeHolder,
- cidocCoords.prefix + 'P98i_was_born',
- e67placeHolder) + closeLine
- output.write(line)
- line = triple(e67placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E67_Birth') + closeLine
- output.write(line)
- line = triple(e67placeHolder,
- schemaCoords.prefix + 'label',
- '\"Nascita di ' + row['AUTN'] + '\"') + closeLine
- output.write(line)
- line = triple(e21placeHolder,
- cidocCoords.prefix + 'P100i_died_in',
- e69placeHolder) + closeLine
- output.write(line)
- line = triple(e69placeHolder,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E69_Death') + closeLine
- output.write(line)
- line = triple(e69placeHolder,
- schemaCoords.prefix + 'label',
- '\"Morte di ' + row['AUTN'] + '\"') + closeLine
- output.write(line)
- #E67 - P7 - E53
- if row['AUTL'] != '':
- line = triple(e67placeHolder,
- cidocCoords.prefix + 'P7_took_place_at',
- museoCoords.prefix + row['AUTL']) + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + row['AUTL'],
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E53_Place') + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + row['AUTL'],
- schemaCoords.prefix + 'label',
- '\"' + row['AUTL'] + '\"') + closeLine
- output.write(line)
- # E67 - P4 - E52
- if row['AUTD'] != '':
- tt = row['AUTD'].replace(' ', '')
- tim = tt.replace('/', '')
- time = tim.replace('.', '')
- line = triple(e67placeHolder,
- cidocCoords.prefix + 'P4_has_time-span',
- museoCoords.prefix + time) + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + time,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E52_Time-Span') + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + time,
- schemaCoords.prefix + 'label',
- '\"' + row['AUTD'] + '\"') + closeLine
- output.write(line)
- # E69 - P7 - E53
- if row['AUTX'] != '':
- line = triple(e69placeHolder,
- cidocCoords.prefix + 'P7_took_place_at',
- museoCoords.prefix + row['AUTX']) + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + row['AUTX'],
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E53_Place') + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + row['AUTX'],
- schemaCoords.prefix + 'label',
- '\"' + row['AUTX'] + '\"') + closeLine
- output.write(line)
- # E69 - P4 - E52
- if row['AUTT'] != '':
- tt = row['AUTT'].replace(' ', '')
- tim = tt.replace('/', '')
- time = tim.replace('.', '')
- line = triple(e69placeHolder,
- cidocCoords.prefix + 'P4_has_time-span',
- museoCoords.prefix + time) + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + time,
- nsCoords.prefix + 'type',
- cidocCoords.prefix + 'E52_Time-Span') + closeLine
- output.write(line)
- line = triple(museoCoords.prefix + time,
- schemaCoords.prefix + 'label',
- '\"' + row['AUTT'] + '\"') + closeLine
- output.write(line)
- output.write('\n')
- #
- #
- # Limit number of entries processed (if desired)
- if (ii > max_entries):
- break
|