"""
Term Cleaner Module
===================
Stop-word temizliği, filtreleme ve normalizasyon
"""

import re
import sys
from typing import List, Dict, Tuple, Set, Optional
from collections import Counter

# Config import
sys.path.insert(0, '..')
try:
    import config
except ImportError:
    from .. import config


class TermCleaner:
    """
    Terim temizleme ve filtreleme sınıfı.
    Stop-word, frekans ve güven skoru filtreleri uygular.
    """
    
    def __init__(self):
        self.en_stopwords = config.ENGLISH_STOP_WORDS
        self.tr_stopwords = config.TURKISH_STOP_WORDS
        
        # Common patterns to clean
        self.clean_patterns = [
            (r'\s+', ' '),           # Multiple spaces
            (r'^\s+|\s+$', ''),      # Leading/trailing spaces
            (r'["\']', ''),          # Quotes
            (r'\([^)]*\)', ''),      # Parentheses content
            (r'\[[^\]]*\]', ''),     # Brackets content
            (r'[0-9]+', ''),         # Numbers (optional)
        ]
    
    def clean_term(self, term: str, remove_numbers: bool = False) -> str:
        """
        Tek bir terimi temizle.
        
        Args:
            term: Temizlenecek terim
            remove_numbers: Sayıları kaldır
            
        Returns:
            Temizlenmiş terim
        """
        if not term:
            return ""
        
        cleaned = term.strip()
        
        # Apply patterns
        for pattern, replacement in self.clean_patterns:
            if pattern == r'[0-9]+' and not remove_numbers:
                continue
            cleaned = re.sub(pattern, replacement, cleaned)
        
        # Normalize spaces
        cleaned = ' '.join(cleaned.split())
        
        return cleaned
    
    def is_valid_term(self, source: str, target: str) -> bool:
        """
        Terim çiftinin geçerli olup olmadığını kontrol et.
        
        Args:
            source: Kaynak terim
            target: Hedef terim
            
        Returns:
            True if valid
        """
        # Empty check
        if not source or not target:
            return False
        
        # Length check
        if len(source) < config.MIN_TERM_LENGTH:
            return False
        if len(target) < config.MIN_TERM_LENGTH:
            return False
        
        # Word count check
        src_words = source.split()
        tgt_words = target.split()
        
        if len(src_words) > config.MAX_TERM_WORDS:
            return False
        if len(tgt_words) > config.MAX_TERM_WORDS:
            return False
        
        # Stop word only check
        if all(w.lower() in self.en_stopwords for w in src_words):
            return False
        if all(w.lower() in self.tr_stopwords for w in tgt_words):
            return False
        
        # Numeric only check
        if source.replace(' ', '').isdigit():
            return False
        if target.replace(' ', '').isdigit():
            return False
        
        return True
    
    def remove_stopwords(self, term: str, lang: str = 'en') -> str:
        """
        Terimden stop-word'leri kaldır.
        
        Args:
            term: İşlenecek terim
            lang: Dil ('en' veya 'tr')
            
        Returns:
            Stop-word'suz terim
        """
        stopwords = self.en_stopwords if lang == 'en' else self.tr_stopwords
        
        words = term.split()
        filtered = [w for w in words if w.lower() not in stopwords]
        
        return ' '.join(filtered)
    
    def filter_by_frequency(self, terms: List[Tuple[str, str, float]], 
                            min_freq: int = None) -> List[Dict]:
        """
        Terimleri frekansa göre filtrele ve say.
        
        Args:
            terms: List of (source, target, score) tuples
            min_freq: Minimum frekans
            
        Returns:
            Filtered and counted terms
        """
        min_freq = min_freq or config.MIN_FREQUENCY
        
        # Count occurrences
        counter = Counter()
        scores = {}
        
        for src, tgt, score in terms:
            key = (src.lower(), tgt.lower())
            counter[key] += 1
            
            # Keep max score
            if key not in scores or score > scores[key]:
                scores[key] = score
        
        # Filter by frequency
        result = []
        for (src, tgt), freq in counter.most_common():
            if freq >= min_freq:
                result.append({
                    'source': src,
                    'target': tgt,
                    'frequency': freq,
                    'confidence': scores[(src, tgt)]
                })
        
        return result
    
    def filter_by_confidence(self, terms: List[Dict], 
                             min_conf: float = None) -> List[Dict]:
        """
        Terimleri güven skoruna göre filtrele.
        
        Args:
            terms: List of term dicts
            min_conf: Minimum güven skoru
            
        Returns:
            Filtered terms
        """
        min_conf = min_conf or config.MIN_CONFIDENCE
        
        return [t for t in terms if t.get('confidence', 0) >= min_conf]
    
    def deduplicate(self, terms: List[Dict]) -> List[Dict]:
        """
        Tekrarlı terimleri kaldır, en yüksek frekanslı olanı tut.
        
        Args:
            terms: List of term dicts
            
        Returns:
            Deduplicated terms
        """
        seen = {}
        
        for term in terms:
            key = term['source'].lower()
            
            if key not in seen:
                seen[key] = term
            else:
                # Keep higher frequency
                if term['frequency'] > seen[key]['frequency']:
                    seen[key] = term
        
        return list(seen.values())
    
    def normalize_case(self, terms: List[Dict], 
                       lowercase: bool = True) -> List[Dict]:
        """
        Terim case'ini normalize et.
        
        Args:
            terms: List of term dicts
            lowercase: Küçük harfe çevir
            
        Returns:
            Normalized terms
        """
        result = []
        
        for term in terms:
            new_term = term.copy()
            
            if lowercase:
                new_term['source'] = term['source'].lower()
                new_term['target'] = term['target'].lower()
            
            result.append(new_term)
        
        return result
    
    def clean_batch(self, terms: List[Tuple[str, str, float]]) -> List[Dict]:
        """
        Batch halinde terimleri temizle ve filtrele.
        
        Args:
            terms: List of (source, target, score) tuples
            
        Returns:
            Cleaned and filtered terms
        """
        # Step 1: Clean individual terms
        cleaned = []
        for src, tgt, score in terms:
            clean_src = self.clean_term(src)
            clean_tgt = self.clean_term(tgt)
            
            if self.is_valid_term(clean_src, clean_tgt):
                cleaned.append((clean_src, clean_tgt, score))
        
        # Step 2: Filter by frequency
        counted = self.filter_by_frequency(cleaned)
        
        # Step 3: Filter by confidence
        confident = self.filter_by_confidence(counted)
        
        # Step 4: Deduplicate
        unique = self.deduplicate(confident)
        
        # Step 5: Sort by frequency
        unique.sort(key=lambda x: x['frequency'], reverse=True)
        
        return unique


# Test
if __name__ == "__main__":
    cleaner = TermCleaner()
    
    # Test terms
    test_terms = [
        ("hydraulic pump", "hidrolik pompa", 0.95),
        ("Hydraulic Pump", "Hidrolik Pompa", 0.92),
        ("the pump", "pompa", 0.80),
        ("safety valve", "emniyet valfi", 0.88),
        ("safety valve", "emniyet valfi", 0.90),
        ("a", "bir", 0.99),  # Stop word
        ("123", "456", 0.99),  # Numbers
        ("", "", 0.99),  # Empty
    ]
    
    print("Test Terms:")
    for src, tgt, score in test_terms:
        print(f"  {src} -> {tgt} (score: {score})")
    
    # Clean batch
    result = cleaner.clean_batch(test_terms)
    
    print(f"\nCleaned Terms ({len(result)}):")
    for term in result:
        print(f"  {term['source']} -> {term['target']} "
              f"(freq: {term['frequency']}, conf: {term['confidence']:.2f})")

