glam/scripts/backfill_remaining_enrichment_history.py
kempersc e5a532a8bc Add comprehensive tests for NLP institution extraction and RDF partnership integration
- Introduced `test_nlp_extractor.py` with unit tests for the InstitutionExtractor, covering various extraction patterns (ISIL, Wikidata, VIAF, city names) and ensuring proper classification of institutions (museum, library, archive).
- Added tests for extracted entities and result handling to validate the extraction process.
- Created `test_partnership_rdf_integration.py` to validate the end-to-end process of extracting partnerships from a conversation and exporting them to RDF format.
- Implemented tests for temporal properties in partnerships and ensured compliance with W3C Organization Ontology patterns.
- Verified that extracted partnerships are correctly linked with PROV-O provenance metadata.
2025-11-19 23:20:47 +01:00

290 lines
11 KiB
Python
Executable file
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python3
"""
Backfill enrichment_history for remaining institutions with Wikidata IDs.
Handles:
- Brazil (7): From Brazilian GLAM conversation
- Belgium (7): Manual enrichments (EU institutions)
- Great Britain (4): From Libyan research conversation
- United States (7): Manual enrichments (library aggregators)
This script:
1. Identifies institutions with Wikidata IDs but no enrichment_history
2. Creates appropriate enrichment_history based on source type
3. Preserves original provenance metadata
4. Backs up files before modification
"""
import yaml
from pathlib import Path
from typing import List, Dict, Any
import shutil
# Country configurations
COUNTRY_CONFIGS = {
'brazil': {
'dir': Path("/Users/kempersc/apps/glam/data/instances/brazil"),
'conversation_id': '0102c00a-4c0a-4488-bdca-5dd9fb94c9c5',
'conversation_date': '2025-09-22T14:40:15Z',
'conversation_name': 'Brazilian_GLAM_collection_inventories',
'enrichment_context': """Wikidata identifier obtained during comprehensive Brazilian GLAM research conversation
(ID: {conv_id}, date: {conv_date}). Research analyzed Brazil's extensive cultural heritage network
including 695+ library services, 500,000+ archival records, and 72,000+ museum objects. Verification
performed through cross-referencing with institutional websites, IBRAM (Brazilian Institute of Museums),
national library and archive systems, and UNESCO World Heritage site documentation."""
},
'belgium': {
'dir': Path("/Users/kempersc/apps/glam/data/instances/belgium"),
'conversation_id': None, # Manual enrichments
'enrichment_context': """Wikidata identifier obtained through manual enrichment process focusing on European Union
institutional heritage collections. Research involved cross-referencing European institution websites,
official EU documentation, library catalogs, and Wikidata entities. Special attention given to EU
institutional archives, parliamentary libraries, and heritage organizations based in Belgium."""
},
'great_britain': {
'dir': Path("/Users/kempersc/apps/glam/data/instances/great_britain"),
'conversation_id': 'd06ded03-ba79-4b79-b068-406c2da01f8c',
'conversation_date': '2025-09-22T14:49:44Z',
'conversation_name': 'Libyan_cultural_heritage_resources',
'enrichment_context': """Wikidata identifier obtained during Libyan cultural heritage research conversation
(ID: {conv_id}, date: {conv_date}). Research identified UK-based institutions conducting research
on Libyan archaeology and North African studies, including academic research centers, digital archives,
and heritage gazetteers. Cross-referencing performed against institutional websites, academic databases,
and Wikidata entities."""
},
'united_states': {
'dir': Path("/Users/kempersc/apps/glam/data/instances/united_states"),
'conversation_id': None, # Mixed sources (marked as "mixed")
'enrichment_context': """Wikidata identifier obtained through manual enrichment process focusing on major US-based
library aggregation platforms and digital heritage infrastructure. Research involved cross-referencing
institutional websites, OCLC documentation, HathiTrust records, Internet Archive metadata, and Wikidata
entities. Special attention given to national-scale digital library platforms and cultural heritage
aggregators."""
}
}
def needs_backfill(institution: Dict[str, Any], conversation_id: str | None) -> bool:
"""Check if institution needs enrichment_history backfill."""
# Must have Wikidata identifier
has_wikidata = False
for identifier in institution.get('identifiers', []):
if identifier.get('identifier_scheme') == 'Wikidata':
has_wikidata = True
break
if not has_wikidata:
return False
# Must lack enrichment_history
provenance = institution.get('provenance', {})
has_enrichment = 'enrichment_history' in provenance
if has_enrichment:
return False
# Check conversation_id if specified
if conversation_id:
conv_id = provenance.get('conversation_id')
if conv_id != conversation_id:
return False
return True
def get_wikidata_id(institution: Dict[str, Any]) -> str:
"""Extract Wikidata Q-number from identifiers."""
for identifier in institution.get('identifiers', []):
if identifier.get('identifier_scheme') == 'Wikidata':
return identifier.get('identifier_value', '')
return ''
def create_enrichment_history(
institution: Dict[str, Any],
config: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Create enrichment_history entry for institution."""
wikidata_id = get_wikidata_id(institution)
# Use extraction_date from provenance as enrichment timestamp
extraction_date = institution.get('provenance', {}).get('extraction_date', '')
if not extraction_date:
# Fallback to conversation date if available
extraction_date = config.get('conversation_date', '2025-11-06T00:00:00Z')
# Build enrichment sources list
sources = [f"https://www.wikidata.org/wiki/{wikidata_id}"]
# Add platform sources if institution references them
digital_platforms = institution.get('digital_platforms', [])
for platform in digital_platforms:
platform_url = platform.get('platform_url', '')
if platform_url and platform_url not in sources:
sources.append(platform_url)
# Build enrichment notes with context
enrichment_notes = config['enrichment_context']
# Format with conversation details if available
if 'conversation_id' in config and config['conversation_id']:
enrichment_notes = enrichment_notes.format(
conv_id=config['conversation_id'],
conv_date=config.get('conversation_date', '').split('T')[0]
)
# Determine method based on source
if config.get('conversation_id'):
method = 'Conversation-based research with Wikidata verification and institutional cross-referencing'
else:
method = 'Manual enrichment with Wikidata verification and institutional documentation review'
return [{
'enrichment_date': extraction_date,
'enrichment_method': method,
'enrichment_source': sources,
'enrichment_notes': enrichment_notes
}]
def backfill_country(country_name: str, config: Dict[str, Any]) -> Dict[str, int]:
"""Process all files for a country and backfill enrichment_history."""
stats = {
'files': 0,
'total': 0,
'backfilled': 0,
'skipped': 0
}
print(f"\n{'='*70}")
print(f"Processing: {country_name.upper()}")
print(f"{'='*70}")
print(f"Conversation ID: {config.get('conversation_id', 'Manual enrichment')}")
print(f"Directory: {config['dir']}")
if not config['dir'].exists():
print(f"❌ Directory not found!")
return stats
# Find all YAML files (exclude backups)
yaml_files = [
f for f in config['dir'].glob("*.yaml")
if not any(suffix in f.name for suffix in ['.backup', '.pre_', '.bak'])
]
if not yaml_files:
print(f"❌ No YAML files found!")
return stats
print(f"Found {len(yaml_files)} file(s)\n")
for yaml_file in yaml_files:
print(f"File: {yaml_file.name}")
stats['files'] += 1
# Load institutions
with open(yaml_file, 'r', encoding='utf-8') as f:
institutions = yaml.safe_load(f)
if not institutions or not isinstance(institutions, list):
print(f" ⚠️ Empty or invalid file")
continue
stats['total'] += len(institutions)
modified = False
# Process each institution
for inst in institutions:
if not isinstance(inst, dict):
continue
if needs_backfill(inst, config.get('conversation_id')):
# Create enrichment_history
enrichment_history = create_enrichment_history(inst, config)
# Add to provenance
if 'provenance' not in inst:
inst['provenance'] = {}
inst['provenance']['enrichment_history'] = enrichment_history
stats['backfilled'] += 1
modified = True
# Log
name = inst.get('name', 'Unknown')
wikidata_id = get_wikidata_id(inst)
print(f"{name} (Wikidata: {wikidata_id})")
else:
stats['skipped'] += 1
# Save if modified
if modified:
# Backup original
backup_path = yaml_file.with_suffix('.yaml.pre_enrichment_backfill')
if not backup_path.exists():
shutil.copy2(yaml_file, backup_path)
print(f" 💾 Backup: {backup_path.name}")
# Write updated file
with open(yaml_file, 'w', encoding='utf-8') as f:
yaml.dump(
institutions,
f,
default_flow_style=False,
allow_unicode=True,
sort_keys=False,
width=120
)
print(f" ✨ Updated: {yaml_file.name}")
else:
print(f" No changes needed")
return stats
def main():
"""Main backfill process."""
print("=" * 70)
print("Remaining Countries Enrichment History Backfill")
print("=" * 70)
print("\nCountries: Brazil (7), Belgium (7), Great Britain (4), United States (7)")
print("Total expected backfills: 25 institutions\n")
# Process all countries
total_stats = {
'countries': 0,
'files': 0,
'total_institutions': 0,
'backfilled': 0,
'skipped': 0
}
for country_name, config in COUNTRY_CONFIGS.items():
stats = backfill_country(country_name, config)
total_stats['countries'] += 1
total_stats['files'] += stats['files']
total_stats['total_institutions'] += stats['total']
total_stats['backfilled'] += stats['backfilled']
total_stats['skipped'] += stats['skipped']
# Summary
print("\n" + "=" * 70)
print("Backfill Summary")
print("=" * 70)
print(f"Countries processed: {total_stats['countries']}")
print(f"Files processed: {total_stats['files']}")
print(f"Total institutions: {total_stats['total_institutions']}")
print(f"✅ Backfilled: {total_stats['backfilled']}")
print(f"⏭️ Skipped: {total_stats['skipped']}")
print()
if total_stats['backfilled'] > 0:
print("✨ Enrichment history successfully backfilled!")
print("\nNext step: Run comprehensive validation across ALL countries")
else:
print(" No institutions required backfilling")
if __name__ == '__main__':
main()