Python fix a broken encoding

跟風遠走 提交于 2019-12-07 21:38:29

问题


I have a small icecast2 home server with django playlist management. Also, i have a lot of mp3's with broken encodings. First, i've tried to find some encoding repair tool on python, but haven't find anything working for me (python-ftfy, nltk - it does not support unicode input).

I use beets pip like a swiss knife for parsing media tags, it's quite simple, and i think, it's almost enough for the most cases.

For character set detection i use chardet, but it has some issues on the short strings, so i use some coercing tweaks for encountered encodings. I presume, if encoding is wrong, it's wrong in all tags, so i collect all used encodings first.

class MostFrequentEncoding(dict):
    def from_attrs(self, obj):
        for attr in dir(obj):
            val = getattr(obj, attr)
            self.feed(val)

    def feed(self, obj):
        if obj and isinstance(obj, basestring):
            guess = chardet.detect(obj)
            encoding = guess['encoding']

            if encoding not in self:
                self.setdefault(encoding, {'confidence': 0.0, 'total': 0})

            self[encoding]['confidence'] += guess['confidence']
            self[encoding]['total'] += 1

    def encodings(self):
        return sorted(self, key=lambda x: self[x]['total'], reverse=True)

Here are the tweaks:

charset_coercing = {
    ('MacCyrillic', 'windows-1251'): {'MacCyrillic': -0.1},
}

That means, if we have MacCyrillic and windows-1251 chance at a same time, we should prefer windows-1251.

def fix_encoding(src, possible_encodings):
    if not isinstance(src, basestring) or not src:
        return src

    guess = chardet.detect(src)
    first_encoding = guess['encoding']

    encodings = list(possible_encodings)        # copy possible encodings
    if first_encoding in encodings:             # we believe chardet, so first tested
        encodings.remove(first_encoding)        # encoding will be the one, detected by chardet
    encodings.insert(0, first_encoding)
    encodings_set = set(encodings)

    tested_encodings = { k:{'string': '', 'confidence': -1.0} for k in encodings }

    try:
        lat = src.encode('latin-1') if isinstance(src, unicode) else src # make latin string
    except UnicodeEncodeError:
        lat = src.encode('utf-8') # may be not necessary, should return src?

    while encodings:
        candidate = encodings.pop(0)
        if not candidate:
            continue

        if not candidate in tested_encodings:
            tested_encodings.setdefault(candidate, {'string': '', 'confidence': -1.0})

        try:
            fixed_string = lat.decode(candidate)
        except UnicodeDecodeError:
            continue

        # try to detect charset again
        fixed_confidence = chardet.detect(fixed_string)['confidence']
        # it seems, that new confidence is usually higher, if the previous detection was right

        tested_encodings[candidate]['string'] = fixed_string
        tested_encodings[candidate]['confidence'] = fixed_confidence

    # perform charset coercing
    for subset, coercing_encodings in charset_coercing.items():
        if set(subset).issubset(encodings_set):
            for enc, penalty in coercing_encodings.items():
                tested_encodings[enc]['confidence'] += penalty


    result = tested_encodings.get(first_encoding)
    if result['confidence'] >= 0.99: # if confidence value for first detection is high, use it
        return result['string']

    max_confidence_charset = max(tested_encodings, key=lambda x: tested_encodings[x]['confidence'])
    return tested_encodings[max_confidence_charset]['string']

Media file parsing:

def extract_tags(media_file):
    try:
        mf = MediaFile(media_file)
    except:
        return {}

    mfe = MostFrequentEncoding()
    mfe.from_attrs(mf)

    encodings = mfe.encodings()
    tags = {}

    for attr in sorted(dir(mf)):
        val = getattr(mf, attr)
        if not val or callable(val) or \
        attr in ['__dict__', '__doc__', '__module__', '__weakref__', 'mgfile', 'art']:
            continue

        fixed = fix_encoding(val, encodings)
        tags[attr] = remove_extra_spaces(fixed) if isinstance(fixed, basestring) else fixed

    if mf.art:
        tags['art'] = { 'data': mf.art, 'mime': imghdr.what(None, h=mf.art) }

    return tags

And the usage for example:

f = '/media/Media/Music/Jason Becker/Jason Becker - Perpetual Burn/02__1.mp3'
pprint(extract_tags(f))

Here is a full script. It can show ascii-covers for albums during parsing.

It seems that it works, but is there any maintained swiss-knife encodings repair lib on python?

来源:https://stackoverflow.com/questions/14131461/python-fix-a-broken-encoding

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!