mirror of
https://github.com/fergalmoran/picard.git
synced 2026-01-09 10:03:59 +00:00
Removed old code from mutagenext (now in mutagen). Added test for APEv2 files.
This commit is contained in:
@@ -21,8 +21,9 @@ import mutagen.apev2
|
||||
import mutagen.monkeysaudio
|
||||
import mutagen.musepack
|
||||
import mutagen.wavpack
|
||||
import mutagenext.optimfrog
|
||||
import mutagen.optimfrog
|
||||
from picard.file import File
|
||||
from picard.metadata import Metadata
|
||||
from picard.util import encode_filename, sanitize_date
|
||||
|
||||
class APEv2File(File):
|
||||
@@ -33,12 +34,20 @@ class APEv2File(File):
|
||||
"Album Artist": "albumartist",
|
||||
"MixArtist": "remixer",
|
||||
"Weblink": "website",
|
||||
"DiscSubtitle": "discsubtitle",
|
||||
"BPM": "bpm",
|
||||
"ISRC": "isrc",
|
||||
"CatalogNumber": "catalognumber",
|
||||
"BarCode": "barcode",
|
||||
"EncodedBy": "encodedby",
|
||||
"MUSICBRAINZ_ALBUMSTATUS": "releasestatus",
|
||||
"MUSICBRAINZ_ALBUMTYPE": "releasetype",
|
||||
}
|
||||
__rtranslate = dict([(v, k) for k, v in __translate.iteritems()])
|
||||
|
||||
def _load(self):
|
||||
file = self._File(encode_filename(self.filename))
|
||||
metadata = Metadata()
|
||||
if file.tags:
|
||||
for origname, values in file.tags.items():
|
||||
for value in values:
|
||||
@@ -62,7 +71,8 @@ class APEv2File(File):
|
||||
name = self.__translate[name]
|
||||
else:
|
||||
name = name.lower()
|
||||
self.metadata.add(name, value)
|
||||
metadata.add(name, value)
|
||||
self.metadata.update(metadata)
|
||||
self._info(file)
|
||||
|
||||
def save(self):
|
||||
@@ -91,14 +101,14 @@ class APEv2File(File):
|
||||
value = '%s/%s' % (value, self.metadata['totaldiscs'])
|
||||
elif name in ('totaltracks', 'totaldiscs'):
|
||||
continue
|
||||
elif name == "albumartist":
|
||||
name = "Album Artist"
|
||||
# "performer:Piano=Joe Barr" => "Performer=Joe Barr (Piano)"
|
||||
elif name.startswith('performer:') or name.startswith('comment:'):
|
||||
name, desc = name.split(':', 1)
|
||||
name = name.title()
|
||||
if desc:
|
||||
value += ' (%s)' % desc
|
||||
elif name in self.__rtranslate:
|
||||
name = self.__rtranslate[name]
|
||||
else:
|
||||
name = name.title()
|
||||
temp.setdefault(name, []).append(value)
|
||||
@@ -128,7 +138,7 @@ class OptimFROGFile(APEv2File):
|
||||
"""OptimFROG file."""
|
||||
EXTENSIONS = [".ofr", ".ofs"]
|
||||
NAME = "OptimFROG"
|
||||
_File = mutagenext.optimfrog.OptimFROG
|
||||
_File = mutagen.optimfrog.OptimFROG
|
||||
def _info(self, file):
|
||||
super(OptimFROGFile, self)._info(file)
|
||||
if self.filename.lower().endswith(".ofs"):
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
from picard.file import File
|
||||
from picard.util import encode_filename
|
||||
from picard.formats.mutagenext.asf import ASF
|
||||
from mutagen.asf import ASF
|
||||
|
||||
class ASFFile(File):
|
||||
"""ASF (WMA) metadata reader/writer"""
|
||||
|
||||
@@ -1,636 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ASF reader/tagger
|
||||
#
|
||||
# Copyright 2006 Lukáš Lalinský <lalinsky@gmail.com>
|
||||
# Copyright 2005-2006 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# $Id$
|
||||
|
||||
"""Read and write metadata to Window Media Audio files.
|
||||
"""
|
||||
|
||||
__all__ = ["ASF", "Open"]
|
||||
|
||||
import struct
|
||||
from mutagen import FileType, Metadata
|
||||
from mutagen._util import insert_bytes, delete_bytes, DictMixin
|
||||
|
||||
class error(IOError): pass
|
||||
class ASFError(error): pass
|
||||
class ASFHeaderError(error): pass
|
||||
|
||||
|
||||
class ASFInfo(object):
|
||||
"""ASF stream information."""
|
||||
|
||||
def __init__(self):
|
||||
self.length = 0.0
|
||||
self.sample_rate = 0
|
||||
self.bitrate = 0
|
||||
self.channels = 0
|
||||
|
||||
def pprint(self):
|
||||
s = "Windows Media Audio %d bps, %s Hz, %d channels, %.2f seconds" % (
|
||||
self.bitrate, self.sample_rate, self.channels, self.length)
|
||||
return s
|
||||
|
||||
|
||||
class ASFTags(list, DictMixin):
|
||||
"""Dictionary containing ASF attributes."""
|
||||
|
||||
def pprint(self):
|
||||
return "\n".join(["%s=%s" % (k, v) for k, v in self])
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""A list of values for the key.
|
||||
|
||||
This is a copy, so comment['title'].append('a title') will not
|
||||
work.
|
||||
|
||||
"""
|
||||
values = [value for (k, value) in self if k == key]
|
||||
if not values: raise KeyError, key
|
||||
else: return values
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete all values associated with the key."""
|
||||
to_delete = filter(lambda x: x[0] == key, self)
|
||||
if not to_delete: raise KeyError, key
|
||||
else: map(self.remove, to_delete)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return true if the key has any values."""
|
||||
for k, value in self:
|
||||
if k == key: return True
|
||||
else: return False
|
||||
|
||||
def __setitem__(self, key, values):
|
||||
"""Set a key's value or values.
|
||||
|
||||
Setting a value overwrites all old ones. The value may be a
|
||||
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
|
||||
string.
|
||||
|
||||
"""
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
try: del(self[key])
|
||||
except KeyError: pass
|
||||
for value in values:
|
||||
if key in _standard_attribute_names:
|
||||
value = unicode(value)
|
||||
elif not isinstance(value, ASFBaseAttribute):
|
||||
if isinstance(value, unicode):
|
||||
value = ASFUnicodeAttribute(value)
|
||||
elif isinstance(value, bool):
|
||||
value = ASFBoolAttribute(value)
|
||||
elif isinstance(value, int):
|
||||
value = ASFDWordAttribute(value)
|
||||
elif isinstance(value, long):
|
||||
value = ASFQWordAttribute(value)
|
||||
self.append((key, value))
|
||||
|
||||
def keys(self):
|
||||
"""Return all keys in the comment."""
|
||||
return self and set(zip(*self)[0])
|
||||
|
||||
def as_dict(self):
|
||||
"""Return a copy of the comment data in a real dict."""
|
||||
d = {}
|
||||
for key, value in self:
|
||||
d.setdefault(key, []).append(value)
|
||||
return d
|
||||
|
||||
|
||||
class ASFBaseAttribute(object):
|
||||
"""Generic attribute."""
|
||||
TYPE = None
|
||||
|
||||
def __init__(self, value=None, data=None, language=None,
|
||||
stream=None, **kwargs):
|
||||
self.language = language
|
||||
self.stream = stream
|
||||
if data:
|
||||
self.value = self.parse(data, **kwargs)
|
||||
else:
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
name = "%s(%r" % (type(self).__name__, self.value)
|
||||
if self.language:
|
||||
name += ", language=%d" % self.language
|
||||
if self.stream:
|
||||
name += ", stream=%d" % self.stream
|
||||
name += ")"
|
||||
return name
|
||||
|
||||
def render(self, name):
|
||||
name = name.encode("utf-16-le") + "\x00\x00"
|
||||
data = self._render()
|
||||
return (struct.pack("<H", len(name)) + name +
|
||||
struct.pack("<HH", self.TYPE, len(data)) + data)
|
||||
|
||||
def render_m(self, name):
|
||||
name = name.encode("utf-16-le") + "\x00\x00"
|
||||
if self.TYPE == 2:
|
||||
data = self._render(dword=False)
|
||||
else:
|
||||
data = self._render()
|
||||
return (struct.pack("<HHHHI", 0, self.stream or 0, len(name),
|
||||
self.TYPE, len(data)) + name + data)
|
||||
|
||||
def render_ml(self, name):
|
||||
name = name.encode("utf-16-le") + "\x00\x00"
|
||||
if self.TYPE == 2:
|
||||
data = self._render(dword=False)
|
||||
else:
|
||||
data = self._render()
|
||||
return (struct.pack("<HHHHI", self.language or 0, self.stream or 0,
|
||||
len(name), self.TYPE, len(data)) + name + data)
|
||||
|
||||
class ASFUnicodeAttribute(ASFBaseAttribute):
|
||||
"""Unicode string attribute."""
|
||||
TYPE = 0x0000
|
||||
|
||||
def parse(self, data):
|
||||
return data.decode("utf-16").strip("\x00")
|
||||
|
||||
def _render(self):
|
||||
return self.value.encode("utf-16-le") + "\x00\x00"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(unicode(self), other)
|
||||
|
||||
|
||||
class ASFByteArrayAttribute(ASFBaseAttribute):
|
||||
"""Byte array attribute."""
|
||||
TYPE = 0x0001
|
||||
|
||||
def parse(self, data):
|
||||
return data
|
||||
|
||||
def _render(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return "[binary data (%s bytes)]" % len(self.value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(str(self), other)
|
||||
|
||||
|
||||
class ASFBoolAttribute(ASFBaseAttribute):
|
||||
"""Bool attribute."""
|
||||
TYPE = 0x0002
|
||||
|
||||
def parse(self, data, dword=True):
|
||||
if dword:
|
||||
return struct.unpack("<I", data)[0] == 1
|
||||
else:
|
||||
return struct.unpack("<H", data)[0] == 1
|
||||
|
||||
def _render(self, dword=True):
|
||||
if dword:
|
||||
return struct.pack("<I", int(self.value))
|
||||
else:
|
||||
return struct.pack("<H", int(self.value))
|
||||
|
||||
def __bool__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(bool(self), other)
|
||||
|
||||
|
||||
class ASFDWordAttribute(ASFBaseAttribute):
|
||||
"""DWORD attribute."""
|
||||
TYPE = 0x0003
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<L", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<L", self.value)
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(int(self), other)
|
||||
|
||||
|
||||
class ASFQWordAttribute(ASFBaseAttribute):
|
||||
"""QWORD attribute."""
|
||||
TYPE = 0x0004
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<Q", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<Q", self.value)
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(int(self), other)
|
||||
|
||||
|
||||
class ASFWordAttribute(ASFBaseAttribute):
|
||||
"""WORD attribute."""
|
||||
TYPE = 0x0005
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<H", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<H", self.value)
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(int(self), other)
|
||||
|
||||
|
||||
class ASFGUIDAttribute(ASFBaseAttribute):
|
||||
"""GUID attribute."""
|
||||
TYPE = 0x0006
|
||||
|
||||
def parse(self, data):
|
||||
return data
|
||||
|
||||
def _render(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(str(self), other)
|
||||
|
||||
|
||||
UNICODE = ASFUnicodeAttribute.TYPE
|
||||
BYTEARRAY = ASFByteArrayAttribute.TYPE
|
||||
BOOL = ASFBoolAttribute.TYPE
|
||||
DWORD = ASFDWordAttribute.TYPE
|
||||
QWORD = ASFQWordAttribute.TYPE
|
||||
WORD = ASFWordAttribute.TYPE
|
||||
GUID = ASFGUIDAttribute.TYPE
|
||||
|
||||
def ASFValue(value, kind, **kwargs):
|
||||
for t, c in _attribute_types.items():
|
||||
if kind == t:
|
||||
return c(value=value, **kwargs)
|
||||
raise ValueError("Unknown value type")
|
||||
|
||||
|
||||
_attribute_types = {
|
||||
ASFUnicodeAttribute.TYPE: ASFUnicodeAttribute,
|
||||
ASFByteArrayAttribute.TYPE: ASFByteArrayAttribute,
|
||||
ASFBoolAttribute.TYPE: ASFBoolAttribute,
|
||||
ASFDWordAttribute.TYPE: ASFDWordAttribute,
|
||||
ASFQWordAttribute.TYPE: ASFQWordAttribute,
|
||||
ASFWordAttribute.TYPE: ASFWordAttribute,
|
||||
ASFGUIDAttribute.TYPE: ASFGUIDAttribute,
|
||||
}
|
||||
|
||||
|
||||
_standard_attribute_names = [
|
||||
"Title",
|
||||
"Author",
|
||||
"Copyright",
|
||||
"Description",
|
||||
"Rating"
|
||||
]
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
"""Base ASF object."""
|
||||
GUID = None
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
self.data = data
|
||||
|
||||
def render(self, asf):
|
||||
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
|
||||
size = len(data)
|
||||
return data
|
||||
|
||||
|
||||
class UnknownObject(BaseObject):
|
||||
"""Unknown ASF object."""
|
||||
def __init__(self, guid):
|
||||
self.GUID = guid
|
||||
|
||||
|
||||
class HeaderObject(object):
|
||||
"""ASF header."""
|
||||
GUID = "\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
|
||||
|
||||
|
||||
class ContentDescriptionObject(BaseObject):
|
||||
"""Content description."""
|
||||
GUID = "\x33\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(ContentDescriptionObject, self).parse(asf, data, fileobj, size)
|
||||
asf.content_description_obj = self
|
||||
lengths = struct.unpack("<HHHHH", data[:10])
|
||||
texts = []
|
||||
pos = 10
|
||||
for length in lengths:
|
||||
end = pos + length
|
||||
texts.append(data[pos:end].decode("utf-16").strip("\x00"))
|
||||
pos = end
|
||||
(asf.tags["Title"], asf.tags["Author"], asf.tags["Copyright"],
|
||||
asf.tags["Description"], asf.tags["Rating"]) = texts
|
||||
|
||||
def render(self, asf):
|
||||
def render_text(name):
|
||||
value = asf.tags.get(name, [])
|
||||
if value and value[0]:
|
||||
return value[0].encode("utf-16-le") + "\x00\x00"
|
||||
else:
|
||||
return ""
|
||||
texts = map(render_text, _standard_attribute_names)
|
||||
data = struct.pack("<HHHHH", *map(str.__len__, texts)) + "".join(texts)
|
||||
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
|
||||
|
||||
|
||||
class ExtendedContentDescriptionObject(BaseObject):
|
||||
"""Extended content description."""
|
||||
GUID = "\x40\xA4\xD0\xD2\x07\xE3\xD2\x11\x97\xF0\x00\xA0\xC9\x5E\xA8\x50"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(ExtendedContentDescriptionObject, self).parse(asf, data, fileobj, size)
|
||||
asf.extended_content_description_obj = self
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in range(num_attributes):
|
||||
name_length, = struct.unpack("<H", data[pos:pos+2])
|
||||
pos += 2
|
||||
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
|
||||
pos += name_length
|
||||
value_type, value_length = struct.unpack("<HH", data[pos:pos+4])
|
||||
pos += 4
|
||||
value = data[pos:pos+value_length]
|
||||
pos += value_length
|
||||
attr = _attribute_types[value_type](data=value)
|
||||
asf.tags.append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_extended_content_description.items()
|
||||
data = "".join([attr.render(name) for (name, attr) in attrs])
|
||||
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
|
||||
return self.GUID + data
|
||||
|
||||
|
||||
class FilePropertiesObject(BaseObject):
|
||||
"""File properties."""
|
||||
GUID = "\xA1\xDC\xAB\x8C\x47\xA9\xCF\x11\x8E\xE4\x00\xC0\x0C\x20\x53\x65"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(FilePropertiesObject, self).parse(asf, data, fileobj, size)
|
||||
length, _, preroll = struct.unpack("<QQQ", data[40:64])
|
||||
asf.info.length = length / 10000000.0 - preroll / 1000.0
|
||||
|
||||
|
||||
class StreamPropertiesObject(BaseObject):
|
||||
"""Stream properties."""
|
||||
GUID = "\x91\x07\xDC\xB7\xB7\xA9\xCF\x11\x8E\xE6\x00\xC0\x0C\x20\x53\x65"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(StreamPropertiesObject, self).parse(asf, data, fileobj, size)
|
||||
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
|
||||
asf.info.channels = channels
|
||||
asf.info.sample_rate = sample_rate
|
||||
asf.info.bitrate = bitrate * 8
|
||||
|
||||
|
||||
class HeaderExtensionObject(BaseObject):
|
||||
"""Header extension."""
|
||||
GUID = "\xb5\x03\xbf_.\xa9\xcf\x11\x8e\xe3\x00\xc0\x0c Se"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(HeaderExtensionObject, self).parse(asf, data, fileobj, size)
|
||||
asf.header_extension_obj = self
|
||||
datasize, = struct.unpack("<I", data[18:22])
|
||||
datapos = 0
|
||||
self.objects = []
|
||||
while datapos < datasize:
|
||||
guid, size = struct.unpack("<16sQ", data[22+datapos:22+datapos+24])
|
||||
if guid in _object_types:
|
||||
obj = _object_types[guid]()
|
||||
else:
|
||||
obj = UnknownObject(guid)
|
||||
obj.parse(asf, data[22+datapos+24:22+datapos+size], fileobj, size)
|
||||
self.objects.append(obj)
|
||||
datapos += size
|
||||
|
||||
def render(self, asf):
|
||||
data = "".join([obj.render(asf) for obj in self.objects])
|
||||
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
|
||||
"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
|
||||
"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
|
||||
"\x06\x00" + struct.pack("<I", len(data)) + data)
|
||||
|
||||
|
||||
class MetadataObject(BaseObject):
|
||||
"""Metadata description."""
|
||||
GUID = "\xea\xcb\xf8\xc5\xaf[wH\x84g\xaa\x8cD\xfaL\xca"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(MetadataObject, self).parse(asf, data, fileobj, size)
|
||||
asf.metadata_obj = self
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in range(num_attributes):
|
||||
(reserved, stream, name_length, value_type,
|
||||
value_length) = struct.unpack("<HHHHI", data[pos:pos+12])
|
||||
pos += 12
|
||||
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
|
||||
pos += name_length
|
||||
value = data[pos:pos+value_length]
|
||||
pos += value_length
|
||||
args = {'data': value, 'stream': stream}
|
||||
if value_type == 2:
|
||||
args['dword'] = False
|
||||
attr = _attribute_types[value_type](**args)
|
||||
asf.tags.append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_metadata.items()
|
||||
data = "".join([attr.render_m(name) for (name, attr) in attrs])
|
||||
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
|
||||
data)
|
||||
|
||||
|
||||
class MetadataLibraryObject(BaseObject):
|
||||
"""Metadata library description."""
|
||||
GUID = "\x94\x1c#D\x98\x94\xd1I\xa1A\x1d\x13NEpT"
|
||||
|
||||
def parse(self, asf, data, fileobj, size):
|
||||
super(MetadataLibraryObject, self).parse(asf, data, fileobj, size)
|
||||
asf.metadata_library_obj = self
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in range(num_attributes):
|
||||
(language, stream, name_length, value_type,
|
||||
value_length) = struct.unpack("<HHHHI", data[pos:pos+12])
|
||||
pos += 12
|
||||
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
|
||||
pos += name_length
|
||||
value = data[pos:pos+value_length]
|
||||
pos += value_length
|
||||
args = {'data': value, 'language': language, 'stream': stream}
|
||||
if value_type == 2:
|
||||
args['dword'] = False
|
||||
attr = _attribute_types[value_type](**args)
|
||||
asf.tags.append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_metadata_library
|
||||
data = "".join([attr.render_ml(name) for (name, attr) in attrs])
|
||||
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
|
||||
data)
|
||||
|
||||
|
||||
_object_types = {
|
||||
ExtendedContentDescriptionObject.GUID: ExtendedContentDescriptionObject,
|
||||
ContentDescriptionObject.GUID: ContentDescriptionObject,
|
||||
FilePropertiesObject.GUID: FilePropertiesObject,
|
||||
StreamPropertiesObject.GUID: StreamPropertiesObject,
|
||||
HeaderExtensionObject.GUID: HeaderExtensionObject,
|
||||
MetadataLibraryObject.GUID: MetadataLibraryObject,
|
||||
MetadataObject.GUID: MetadataObject,
|
||||
}
|
||||
|
||||
|
||||
class ASF(FileType):
|
||||
"""An ASF file, probably containing WMA or WMV."""
|
||||
|
||||
def load(self, filename):
|
||||
self.filename = filename
|
||||
fileobj = file(filename, "rb")
|
||||
try:
|
||||
self.size = 0
|
||||
self.size1 = 0
|
||||
self.size2 = 0
|
||||
self.offset1 = 0
|
||||
self.offset2 = 0
|
||||
self.num_objects = 0
|
||||
self.info = ASFInfo()
|
||||
self.tags = ASFTags()
|
||||
self.__read_file(fileobj)
|
||||
finally:
|
||||
fileobj.close()
|
||||
|
||||
def save(self):
|
||||
|
||||
# Move attributes to the right objects
|
||||
self.to_extended_content_description = {}
|
||||
self.to_metadata = {}
|
||||
self.to_metadata_library = []
|
||||
for name, value in self.tags:
|
||||
if name in _standard_attribute_names:
|
||||
continue
|
||||
if (value.language is None and value.stream is None and
|
||||
name not in self.to_extended_content_description):
|
||||
self.to_extended_content_description[name] = value
|
||||
elif (value.language is None and value.stream is not None and
|
||||
name not in self.to_metadata):
|
||||
self.to_metadata[name] = value
|
||||
else:
|
||||
self.to_metadata_library.append((name, value))
|
||||
|
||||
# Add missing objects
|
||||
if not self.content_description_obj:
|
||||
self.content_description_obj = \
|
||||
ContentDescriptionObject()
|
||||
self.objects.append(self.content_description_obj)
|
||||
if not self.extended_content_description_obj:
|
||||
self.extended_content_description_obj = \
|
||||
ExtendedContentDescriptionObject()
|
||||
self.objects.append(self.extended_content_description_obj)
|
||||
if not self.header_extension_obj:
|
||||
self.header_extension_obj = \
|
||||
HeaderExtensionObject()
|
||||
self.objects.append(self.header_extension_obj)
|
||||
if not self.metadata_obj:
|
||||
self.metadata_obj = \
|
||||
MetadataObject()
|
||||
self.header_extension_obj.objects.append(self.metadata_obj)
|
||||
if not self.metadata_library_obj:
|
||||
self.metadata_library_obj = \
|
||||
MetadataLibraryObject()
|
||||
self.header_extension_obj.objects.append(self.metadata_library_obj)
|
||||
|
||||
# Render the header
|
||||
data = "".join([obj.render(self) for obj in self.objects])
|
||||
data = (HeaderObject.GUID +
|
||||
struct.pack("<QL", len(data) + 30, len(self.objects)) +
|
||||
"\x01\x02" + data)
|
||||
|
||||
fileobj = file(self.filename, "rb+")
|
||||
try:
|
||||
size = len(data)
|
||||
if size > self.size:
|
||||
insert_bytes(fileobj, size - self.size, self.size)
|
||||
if size < self.size:
|
||||
delete_bytes(fileobj, self.size - size, 0)
|
||||
fileobj.seek(0)
|
||||
fileobj.write(data)
|
||||
finally:
|
||||
fileobj.close()
|
||||
|
||||
def __read_file(self, fileobj):
|
||||
header = fileobj.read(30)
|
||||
if len(header) != 30 or header[:16] != HeaderObject.GUID:
|
||||
raise ASFHeaderError, "Not an ASF file."
|
||||
|
||||
self.extended_content_description_obj = None
|
||||
self.content_description_obj = None
|
||||
self.header_extension_obj = None
|
||||
self.metadata_obj = None
|
||||
self.metadata_library_obj = None
|
||||
|
||||
self.size, self.num_objects = struct.unpack("<QL", header[16:28])
|
||||
self.objects = []
|
||||
for i in range(self.num_objects):
|
||||
self.__read_object(fileobj)
|
||||
|
||||
def __read_object(self, fileobj):
|
||||
guid, size = struct.unpack("<16sQ", fileobj.read(24))
|
||||
if guid in _object_types:
|
||||
obj = _object_types[guid]()
|
||||
else:
|
||||
obj = UnknownObject(guid)
|
||||
data = fileobj.read(size - 24)
|
||||
obj.parse(self, data, fileobj, size)
|
||||
self.objects.append(obj)
|
||||
|
||||
def score(filename, fileobj, header):
|
||||
return header.startswith(HeaderObject.GUID) * 2
|
||||
score = staticmethod(score)
|
||||
|
||||
Open = ASF
|
||||
@@ -1,63 +0,0 @@
|
||||
# OptimFROG reader/tagger
|
||||
#
|
||||
# Copyright 2006 Lukas Lalinsky <lalinsky@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# $Id$
|
||||
|
||||
"""OptimFROG audio streams with APEv2 tags.
|
||||
|
||||
OptimFROG is a lossless audio compression program. Its main goal is to
|
||||
reduce at maximum the size of audio files, while permitting bit
|
||||
identical restoration for all input. It is similar with the ZIP
|
||||
compression, but it is highly specialized to compress audio data.
|
||||
|
||||
For more information, see http://www.losslessaudio.org/
|
||||
"""
|
||||
|
||||
__all__ = ["OptimFROG", "Open", "delete"]
|
||||
|
||||
import struct
|
||||
from mutagen.apev2 import APEv2File, error
|
||||
|
||||
class OptimFROGHeaderError(error): pass
|
||||
|
||||
class OptimFROGInfo(object):
|
||||
"""OptimFROG stream information.
|
||||
|
||||
Attributes:
|
||||
channels - number of audio channels
|
||||
length - file length in seconds, as a float
|
||||
sample_rate - audio sampling rate in Hz
|
||||
bitrate -- audio bitrate, in bits per second
|
||||
"""
|
||||
|
||||
def __init__(self, fileobj):
|
||||
header = fileobj.read(76)
|
||||
if (len(header) != 76 or not header.startswith("OFR ") or
|
||||
struct.unpack("<I", header[4:8])[0] not in [12, 15]):
|
||||
raise OptimFROGHeaderError("not an OptimFROG file")
|
||||
(total_samples, total_samples_high, sample_type, self.channels,
|
||||
self.sample_rate) = struct.unpack("<IHBBI", header[8:20])
|
||||
total_samples += total_samples_high << 32
|
||||
self.channels += 1
|
||||
if self.sample_rate:
|
||||
self.length = float(total_samples) / (self.channels *
|
||||
self.sample_rate)
|
||||
else:
|
||||
self.length = 0.0
|
||||
|
||||
def pprint(self):
|
||||
return "OptimFROG, %.2f seconds, %d Hz" % (self.length,
|
||||
self.sample_rate)
|
||||
|
||||
class OptimFROG(APEv2File):
|
||||
_Info = OptimFROGInfo
|
||||
|
||||
def score(filename, fileobj, header):
|
||||
return (header.startswith("OFR") + filename.endswith(".ofr") +
|
||||
filename.endswith(".ofs"))
|
||||
score = staticmethod(score)
|
||||
BIN
test/data/test.wv
Normal file
BIN
test/data/test.wv
Normal file
Binary file not shown.
@@ -237,3 +237,52 @@ class MP4VorbisTest(FormatsTest):
|
||||
('lyrics', ['Foo']),
|
||||
('copyright', ['Foo']),
|
||||
]
|
||||
|
||||
|
||||
class WavPackTest(FormatsTest):
|
||||
original = os.path.join('test', 'data', 'test.wv')
|
||||
tags = [
|
||||
('album', ['Foo', 'Bar']),
|
||||
('album', ['1']),
|
||||
('title', ['Foo']),
|
||||
('artist', ['Foo']),
|
||||
('albumartist', ['Foo']),
|
||||
('date', ['2004-00-00'], ['2004']),
|
||||
('artist', ['Foo']),
|
||||
('composer', ['Foo']),
|
||||
('lyricist', ['Foo']),
|
||||
('conductor', ['Foo']),
|
||||
('performer:guest vocal', ['Foo']),
|
||||
('remixer', ['Foo']),
|
||||
('engineer', ['Foo']),
|
||||
('producer', ['Foo']),
|
||||
('grouping', ['Foo']),
|
||||
('subtitle', ['Foo']),
|
||||
('discsubtitle', ['Foo']),
|
||||
('compilation', ['1']),
|
||||
('comment', ['Foo']),
|
||||
('genre', ['Foo']),
|
||||
('bpm', ['Foo']),
|
||||
('mood', ['Foo']),
|
||||
('isrc', ['Foo']),
|
||||
('copyright', ['Foo']),
|
||||
('lyrics', ['Foo']),
|
||||
('media', ['Foo']),
|
||||
('label', ['Foo']),
|
||||
('catalognumber', ['Foo']),
|
||||
('barcode', ['Foo']),
|
||||
('encodedby', ['Foo']),
|
||||
('albumsort', ['Foo']),
|
||||
('albumartistsort', ['Foo']),
|
||||
('artistsort', ['Foo']),
|
||||
('titlesort', ['Foo']),
|
||||
('musicbrainz_trackid', ['Foo']),
|
||||
('musicbrainz_albumid', ['Foo']),
|
||||
('musicbrainz_artistid', ['Foo']),
|
||||
('musicbrainz_albumartistid', ['Foo']),
|
||||
('musicbrainz_trmid', ['Foo']),
|
||||
('musicbrainz_discid', ['Foo']),
|
||||
('musicip_puid', ['Foo']),
|
||||
('releasestatus', ['Foo']),
|
||||
('releasetype', ['Foo']),
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user