123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168 |
- # Utilities to read/write csv files
- import csv
- # Utilities to handle character encodings
- import unicodedata
- # Ordered Dicts
- from collections import OrderedDict
- from urllib.request import urlopen
- from bs4 import BeautifulSoup
- import json
- # OPZIONAL IMPORTS
- # For timestamping/simple speed tests
- from datetime import datetime
- # Random number generator
- from random import *
- # System & command line utilities
- import sys
- # Json for the dictionary
- import json
- import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/CSV/'
- export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/RDF/'
- # Custom class to store URIs + related infos for the ontologies/repositories
- class RDFcoords:
- def __init__(self, uri, prefix, code=None):
- self.uri = uri
- self.prefix = prefix
- self.code = code
- # Repositories
- museoCoords = RDFcoords('<https://palazzopretorio.prato.it/it/le-opere/alcuni-capolavori/>', 'mpp:')
- autCoords = RDFcoords('<https://palazzopretorio.prato.it/it/opere/autori/>', 'aut:')
- cidocCoords = RDFcoords('<http://www.cidoc-crm.org/cidoc-crm/>', 'crm:')
- aatCoords = RDFcoords('<http://vocab.getty.edu/aat/>', 'aat:')
- nsCoords = RDFcoords('<http://www.w3.org/1999/02/22-rdf-syntax-ns#>', 'rdf:')
- schemaCoords = RDFcoords('<http://www.w3.org/2000/01/rdf-schema#>', 'rdfs:')
- xsdCoords = RDFcoords('<http://www.w3.org/2001/XMLSchema#>', 'xsd:')
- iconCoords = RDFcoords('<http://iconclass.org/>', 'ico:')
- foafCoords = RDFcoords('<http://xmlns.com/foaf/0.1/>', 'foaf:')
- owlCoords = RDFcoords('<http://www.w3.org/2002/07/owl#>', 'owl:')
- # Basic functions for triples / shortened triples in TTL format
- def triple(subject, predicate, object1):
- line = subject + ' ' + predicate + ' ' + object1
- return line
- def doublet(predicate, object1):
- line = ' ' + predicate + ' ' + object1
- return line
- def singlet(object1):
- line = ' ' + object1
- return line
- # Line endings in TTL format
- continueLine1 = ' ;\n'
- continueLine2 = ' ,\n'
- closeLine = ' .\n'
- def writeTTLHeader(output):
- output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)
- output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
- output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)
- output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
- output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
- output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)
- output.write('@prefix ' + xsdCoords.prefix + ' ' + xsdCoords.uri + closeLine)
- output.write('@prefix ' + iconCoords.prefix + ' ' + iconCoords.uri + closeLine)
- output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
- output.write('@prefix ' + foafCoords.prefix + ' ' + foafCoords.uri + closeLine)
- output.write('\n')
- filePrefix = 'PEOPLE'
- fileType = ''
- max_entries = 1000000000
- def get_aut_url(code):
- aut_file = open(import_dir + 'AR20AUT_' + fileType + '.csv', newline="")
- reader = csv.DictReader(aut_file)
- for row in reader:
- auth = int(row['AUTH'])
- cod = int(code)
- role = ''
- if row['AUTQ'] != '':
- role = row['AUTQ']
- else:
- role = ''
- if auth == cod:
- return [row['URL'], role]
- def get_role(role):
- role_file = open('/Users/federicaspinelli/Google Drive/OVI-CNR/CSV/MPP/AAT_RUOLI.csv', newline="")
- reader = csv.DictReader(role_file)
- for row in reader:
- if row['Label'] == role:
- return row['AAT']
- def get_elem(mtc):
- mtc_file = open('/Users/federicaspinelli/Google Drive/OVI-CNR/CSV/MPP/AAT_MTC.csv', newline="")
- reader = csv.DictReader(mtc_file)
- for row in reader:
- if row['MTC'] == mtc:
- return [row['AAT'], row['Type']]
- with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
- export_dir + filePrefix + fileType + '.ttl', 'w') as output:
- reader = csv.DictReader(csv_file)
- writeTTLHeader(output)
- first = True
- ii = 0
- for row in reader:
- # The index ii is used to process a limited number of entries for testing purposes
- ii = ii + 1
- # placeHolders
- urlplaceHolder = '<' + row['URL'] + '>'
- e36placeHolder = '<' + row['URL'] + '_E36>'
- e36e42placeHolder = '<' + row['URL'] + '_E36_E42>'
- e36placeHolder2 = '<' + row['URL'] + '_E36_2>'
- e36e42placeHolder2 = '<' + row['URL'] + '_E36_2_E42>'
- e36placeHolder3 = '<' + row['URL'] + '_E36_3>'
- e36e42placeHolder3 = '<' + row['URL'] + '_E36_3_E42>'
- e31placeHolder = '<' + row['URL'] + '_E31>'
- e3142placeHolder = '<' + row['URL'] + '_E31_E42>'
- e31e36rplaceHolder = '<' + row['URL'] + '_E31_E36_r>'
- e31e36re42placeHolder = '<' + row['URL'] + '_E31_E36_r_E42>'
- e31e36vplaceHolder = '<' + row['URL'] + '_E31_E36_v>'
- e31e36ve42placeHolder = '<' + row['URL'] + '_E31_E36_v_E42>'
- # e31AplaceHolder = '<' + row['LINK ARCHIVE.org'] + '>'
- # e31Ae42placeHolder = '<' + row['LINK ARCHIVE.org'] + '_E42>'
- e13placeHolder = '<' + row['URL'] + '_E13_FA>'
- # e31Ae55placeHolder = '<' + row['AAT (Attribution)'] + '_E31_E55>'
- e12placeHolder = '<' + row['URL'] + '_E12>'
- e12FplaceHolder = '<' + row['URL'] + '_E12F>'
-
-
- if (row['URL']!= ''):
- e21AUTplaceHolder = '<'+row['URL']+'>'
- if (row['ULAN']!= ''):
- e21ULANplaceHolder = '<https://vocab.getty.edu/page/ulan/'+row['ULAN']+'>'
- line = triple(e21AUTplaceHolder, owlCoords.prefix + 'sameAs', e21ULANplaceHolder) + closeLine
- output.write(line)
- if (row['VIAF']!= ''):
- e21VIAFplaceHolder = '<https://viaf.org/viaf/'+row['VIAF']+'>'
- line = triple(e21AUTplaceHolder, owlCoords.prefix + 'sameAs', e21VIAFplaceHolder) + closeLine
- output.write(line)
-
- output.write('\n')
- #
- #
- # Limit number of entries processed (if desired)
- if (ii > max_entries):
- break
|