diff options
author | Scott Jackson <daneren2005@users.noreply.github.com> | 2022-04-20 21:22:48 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-04-20 21:22:48 -0700 |
commit | 6a513834180a343773424200299d4973b6bbec52 (patch) | |
tree | b276e7ce259e30043bd82aed26e2b697f44ee958 | |
parent | 47cd98ef2b6c9cbe17b64da67221f3238818a384 (diff) | |
parent | d0221587e9d332c522f7ac429781d4ccaa7f5ce9 (diff) | |
download | dsub-6a513834180a343773424200299d4973b6bbec52.tar.gz dsub-6a513834180a343773424200299d4973b6bbec52.tar.bz2 dsub-6a513834180a343773424200299d4973b6bbec52.zip |
Merge pull request #1094 from paroj/edge
sync bastp to upstream/ vanilla-music
12 files changed, 1099 insertions, 260 deletions
diff --git a/app/src/main/java/github/daneren2005/dsub/service/DownloadService.java b/app/src/main/java/github/daneren2005/dsub/service/DownloadService.java index 67a0f36f..dbc1eacc 100644 --- a/app/src/main/java/github/daneren2005/dsub/service/DownloadService.java +++ b/app/src/main/java/github/daneren2005/dsub/service/DownloadService.java @@ -192,6 +192,11 @@ public class DownloadService extends Service { private long subtractNextPosition = 0; private int subtractPosition = 0; + /** + * Reference to precreated BASTP Object + */ + private BastpUtil mBastpUtil; + @Override public void onCreate() { super.onCreate(); @@ -201,6 +206,7 @@ public class DownloadService extends Service { public void run() { Looper.prepare(); + mBastpUtil = new BastpUtil(); mediaPlayer = new MediaPlayer(); mediaPlayer.setWakeMode(DownloadService.this, PowerManager.PARTIAL_WAKE_LOCK); @@ -2645,7 +2651,7 @@ public class DownloadService extends Service { try { float adjust = 0f; if (prefs.getBoolean(Constants.PREFERENCES_KEY_REPLAY_GAIN, false)) { - float[] rg = BastpUtil.getReplayGainValues(downloadFile.getFile().getCanonicalPath()); /* track, album */ + BastpUtil.GainValues rg = mBastpUtil.getReplayGainValues(downloadFile.getFile().getCanonicalPath()); /* track, album */ boolean singleAlbum = false; String replayGainType = prefs.getString(Constants.PREFERENCES_KEY_REPLAY_GAIN_TYPE, "1"); @@ -2690,14 +2696,14 @@ public class DownloadService extends Service { // If playing a single album or no track gain, use album gain - if((singleAlbum || rg[0] == 0) && rg[1] != 0) { - adjust = rg[1]; + if((singleAlbum || rg.album == 0) && rg.track != 0) { + adjust = rg.album; } else { // Otherwise, give priority to track gain - adjust = rg[0]; + adjust = rg.track; } - if (adjust == 0) { + if (!rg.found) { /* No RG value found: decrease volume for untagged song if requested by user */ int untagged = Integer.parseInt(prefs.getString(Constants.PREFERENCES_KEY_REPLAY_GAIN_UNTAGGED, "0")); adjust = (untagged - 150) / 10f; diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/Bastp.java b/app/src/main/java/github/daneren2005/dsub/util/tags/Bastp.java index aa0a2e25..dd456039 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/Bastp.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/Bastp.java @@ -1,5 +1,6 @@ /* * Copyright (C) 2013 Adrian Ulrich <adrian@blinkenlights.ch> + * Copyright (C) 2017 Google Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -24,10 +25,10 @@ import java.util.HashMap; public class Bastp { - + public Bastp() { } - + public HashMap getTags(String fname) { HashMap tags = new HashMap(); try { @@ -44,31 +45,61 @@ public class Bastp { public HashMap getTags(RandomAccessFile s) { HashMap tags = new HashMap(); - byte[] file_ff = new byte[4]; + byte[] file_ff = new byte[12]; try { s.read(file_ff); String magic = new String(file_ff); - if(magic.equals("fLaC")) { + if(magic.substring(0,4).equals("fLaC")) { tags = (new FlacFile()).getTags(s); + tags.put("type", "FLAC"); } - else if(magic.equals("OggS")) { - tags = (new OggFile()).getTags(s); + else if(magic.substring(0,4).equals("OggS")) { + // This may be an Opus OR an Ogg Vorbis file + tags = (new OpusFile()).getTags(s); + if (tags.size() > 0) { + tags.put("type", "OPUS"); + } else { + tags = (new OggFile()).getTags(s); + tags.put("type", "OGG"); + } } else if(file_ff[0] == -1 && file_ff[1] == -5) { /* aka 0xfffb in real languages */ tags = (new LameHeader()).getTags(s); + tags.put("type", "MP3/Lame"); } else if(magic.substring(0,3).equals("ID3")) { tags = (new ID3v2File()).getTags(s); if(tags.containsKey("_hdrlen")) { Long hlen = Long.parseLong( tags.get("_hdrlen").toString(), 10 ); HashMap lameInfo = (new LameHeader()).parseLameHeader(s, hlen); - /* add gain tags if not already present */ + /* add tags from lame header if not already present */ inheritTag("REPLAYGAIN_TRACK_GAIN", lameInfo, tags); inheritTag("REPLAYGAIN_ALBUM_GAIN", lameInfo, tags); + inheritTag("duration", lameInfo, tags); } + tags.put("type", "MP3/ID3v2"); } - tags.put("_magic", magic); + else if(magic.substring(4,8).equals("ftyp") && ( + // see http://www.ftyps.com/ for all MP4 subtypes + magic.substring(8,11).equals("M4A") || // Apple audio + magic.substring(8,11).equals("M4V") || // Apple video + magic.substring(8,12).equals("mp42") || // generic MP4, e.g. FAAC + magic.substring(8,12).equals("isom") || // generic MP4, e.g. ffmpeg + magic.substring(8,12).equals("dash") // IEC 23009-1 data + )) { + tags = (new Mp4File()).getTags(s); + tags.put("type", "MP4"); + } + else if(magic.substring(0,4).equals("MThd")) { + tags = (new RawFile()).getTags(s); + tags.put("type", "MIDI"); + } + else if(file_ff[0] == -1 && (file_ff[1]&0xF0) == 0xF0) { /* aka 0xfff? */ + tags = (new RawFile()).getTags(s); + tags.put("type", "ADTS"); + } + } catch (IOException e) { } diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/BastpUtil.java b/app/src/main/java/github/daneren2005/dsub/util/tags/BastpUtil.java index 7ff517fd..802aa5ad 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/BastpUtil.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/BastpUtil.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Adrian Ulrich <adrian@blinkenlights.ch> + * Copyright (C) 2013-2019 Adrian Ulrich <adrian@blinkenlights.ch> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,59 +14,115 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ - + package github.daneren2005.dsub.util.tags; -import android.support.v4.util.LruCache; +import android.util.LruCache; +import java.util.ArrayList; import java.util.HashMap; -import java.util.Vector; -public final class BastpUtil { - private static final RGLruCache rgCache = new RGLruCache(16); +public class BastpUtil { + /** + * Our global instance cache + */ + private RGLruCache rgCache; + /** + * What we return & cache + */ + public class GainValues { + public float album; + public float track; + public boolean found; + } + /** + * LRU cache for ReplayGain values + */ + private class RGLruCache extends LruCache<String, GainValues> { + public RGLruCache(int size) { + super(size); + } + } + - /** Returns the ReplayGain values of 'path' as <track,album> + public BastpUtil() { + rgCache = new RGLruCache(64); + } + + /** + * Returns a GainValues object for `path' */ - public static float[] getReplayGainValues(String path) { - float[] cached = rgCache.get(path); + public GainValues getReplayGainValues(String path) { + if(path == null) { + // path must not be null + path = "//null\\"; + } + GainValues cached = rgCache.get(path); if(cached == null) { cached = getReplayGainValuesFromFile(path); rgCache.put(path, cached); } return cached; } - - - - /** Parse given file and return track,album replay gain values + + /** + * Parse given file and return track,album replay gain values */ - private static float[] getReplayGainValuesFromFile(String path) { - String[] keys = { "REPLAYGAIN_TRACK_GAIN", "REPLAYGAIN_ALBUM_GAIN" }; - float[] adjust= { 0f , 0f }; + private GainValues getReplayGainValuesFromFile(String path) { HashMap tags = (new Bastp()).getTags(path); - - for (int i=0; i<keys.length; i++) { - String curKey = keys[i]; - if(tags.containsKey(curKey)) { - String rg_raw = (String)((Vector)tags.get(curKey)).get(0); - String rg_numonly = ""; - float rg_float = 0f; - try { - String nums = rg_raw.replaceAll("[^0-9.-]",""); - rg_float = Float.parseFloat(nums); - } catch(Exception e) {} - adjust[i] = rg_float; + GainValues gv = new GainValues(); + + // normal replay gain + if(tags.containsKey("REPLAYGAIN_TRACK_GAIN")) { + gv.track = getFloatFromString((String)((ArrayList)tags.get("REPLAYGAIN_TRACK_GAIN")).get(0)); + gv.found = true; + } + if(tags.containsKey("REPLAYGAIN_ALBUM_GAIN")) { + gv.album = getFloatFromString((String)((ArrayList)tags.get("REPLAYGAIN_ALBUM_GAIN")).get(0)); + gv.found = true; + } + + // R128 replay gain + boolean r128 = false; + if(tags.containsKey("R128_BASTP_BASE_GAIN")) { + // This is the gain found in the opus header which automatically gets applied by the media framework. + // We therefore do not need to include it in our calculation, but we set the 'found' bit and reset + // both album and track gain information as an opus file should only ever contain r128 gain infos. + float base = getFloatFromString((String)((ArrayList)tags.get("R128_BASTP_BASE_GAIN")).get(0)) / 256.0f; + if (base != 0.0f) { + gv.track = 0; + gv.album = 0; + gv.found = true; } } - return adjust; + if(tags.containsKey("R128_TRACK_GAIN")) { + gv.track += getFloatFromString((String)((ArrayList)tags.get("R128_TRACK_GAIN")).get(0)) / 256.0f; + gv.found = true; + r128 = true; + } + if(tags.containsKey("R128_ALBUM_GAIN")) { + gv.album += getFloatFromString((String)((ArrayList)tags.get("R128_ALBUM_GAIN")).get(0)) / 256.0f; + gv.found = true; + r128 = true; + } + + if (r128) { + gv.track += 5.0f; + gv.album += 5.0f; + } + return gv; } - - /** LRU cache for ReplayGain values + + /** + * Parses common replayGain string values */ - private static class RGLruCache extends LruCache<String, float[]> { - public RGLruCache(int size) { - super(size); - } + private float getFloatFromString(String rg_raw) { + float rg_float = 0f; + try { + String nums = rg_raw.replaceAll("[^0-9.-]",""); + rg_float = Float.parseFloat(nums); + } catch(Exception e) {} + return rg_float; } } diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/Common.java b/app/src/main/java/github/daneren2005/dsub/util/tags/Common.java index 51344d90..c0d661f5 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/Common.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/Common.java @@ -21,18 +21,14 @@ package github.daneren2005.dsub.util.tags; import java.io.IOException; import java.io.RandomAccessFile; import java.util.HashMap; -import java.util.Vector; +import java.util.ArrayList; public class Common { - private static final long MAX_PKT_SIZE = 524288; - - public void xdie(String reason) throws IOException { - throw new IOException(reason); - } - - /* - ** Returns a 32bit int from given byte offset in LE - */ + private static final int MAX_COMMENT_SIZE = 512; + + /** + * Returns a 32bit int from given byte offset in LE + */ public int b2le32(byte[] b, int off) { int r = 0; for(int i=0; i<4; i++) { @@ -40,7 +36,17 @@ public class Common { } return r; } - + + /** + * Same as b2le32 but reads from a RandomAccessFile instead of a buffer + */ + public int raf2le32(RandomAccessFile fh, long off) throws IOException { + byte[] scratch = new byte[4]; + fh.seek(off); + fh.read(scratch); + return b2le32(scratch, 0); + } + public int b2be32(byte[] b, int off) { return swap32(b2le32(b, off)); } @@ -48,63 +54,102 @@ public class Common { public int swap32(int i) { return((i&0xff)<<24)+((i&0xff00)<<8)+((i&0xff0000)>>8)+((i>>24)&0xff); } - - /* - ** convert 'byte' value into unsigned int - */ + + /** + * Returns a 16bit int from given byte offset in LE + */ + public int b2le16(byte[] b, int off) { + return ( b2u(b[off]) | b2u(b[off+1]) << 8 ); + } + + /** + * convert 'byte' value into unsigned int + */ public int b2u(byte x) { return (x & 0xFF); } - /* - ** Printout debug message to STDOUT - */ + /** + * Printout debug message to STDOUT + */ public void debug(String s) { System.out.println("DBUG "+s); } - - public HashMap parse_vorbis_comment(RandomAccessFile s, long offset, long payload_len) throws IOException { + + /** + * Throws an exception, killing the parser + */ + public void xdie(String reason) throws IOException { + throw new IOException(reason); + } + + public HashMap parse_vorbis_comment(RandomAccessFile fh, PageInfo.PageParser pp, long offset, long payload_len) throws IOException { HashMap tags = new HashMap(); - int comments = 0; // number of found comments - int xoff = 0; // offset within 'scratch' - int can_read = (int)(payload_len > MAX_PKT_SIZE ? MAX_PKT_SIZE : payload_len); - byte[] scratch = new byte[can_read]; - - // seek to given position and slurp in the payload - s.seek(offset); - s.read(scratch); - - // skip vendor string in format: [LEN][VENDOR_STRING] - xoff += 4 + b2le32(scratch, xoff); // 4 = LEN = 32bit int - comments = b2le32(scratch, xoff); - xoff += 4; - - // debug("comments count = "+comments); - for(int i=0; i<comments; i++) { - - int clen = (int)b2le32(scratch, xoff); - xoff += 4+clen; - - if(xoff > scratch.length) - xdie("string out of bounds"); - - String tag_raw = new String(scratch, xoff-clen, clen); - String[] tag_vec = tag_raw.split("=",2); - String tag_key = tag_vec[0].toUpperCase(); - - addTagEntry(tags, tag_key, tag_vec[1]); + long last_byte = offset + payload_len; + + // skip vendor string in format: [LEN][VENDOR_STRING] -> 4 = LEN = 32bit int + offset += 4 + raf2le32(fh, offset); + + // we can now read the number of comments in this file, we will also + // adjust offset to point to the value after this 32bit int + int comments = raf2le32(fh, offset); + offset += 4; + + for ( ; comments > 0; comments--) { + int comment_len = raf2le32(fh, offset); + offset += 4; + long can_read = last_byte - offset; // indicates the last byte of this page + int do_read = (int)(can_read > comment_len ? comment_len : can_read); // how much data is readable in this page + + if (do_read >= 3) { + int bsize = (do_read > MAX_COMMENT_SIZE ? MAX_COMMENT_SIZE : do_read); + byte[] data = new byte[bsize]; + fh.seek(offset); + fh.read(data); + String tag_raw = new String(data); + String[] tag_vec = tag_raw.split("=", 2); + String tag_key = tag_vec[0].toUpperCase(); + addTagEntry(tags, tag_key, tag_vec[1]); + } + + // set offset to begin of next tag (OR the end of this page!) + offset += do_read; + + // We hit the end of a stream + // this is most likely due to the fact that we cropped do_read to not cross + // the page boundary -> we must now calculate the position of the next tag + if (offset == last_byte) { + int partial_cruft = comment_len - do_read; // how many bytes we did not read + while(partial_cruft > 0) { + PageInfo pi = pp.parse_stream_page(fh, last_byte); + if (pi.header_len <1 || pi.payload_len < 1) + xdie("Data from callback doesnt make much sense"); + + offset += pi.header_len; // move position behind page header + last_byte = offset + pi.payload_len; // and adjust the last byte to pos + payload_size + + if (offset+partial_cruft < last_byte) { + offset += partial_cruft; // partial data ends in this block: just adjust the ofset + break; + } else { + // this page just contains data from the partial tag -> skip to next one + offset = last_byte; + partial_cruft -= pi.payload_len; + } + } + } } return tags; } - + public void addTagEntry(HashMap tags, String key, String value) { if(tags.containsKey(key)) { - ((Vector)tags.get(key)).add(value); // just add to existing vector + ((ArrayList)tags.get(key)).add(value); // just add to existing vector } else { - Vector vx = new Vector(); - vx.add(value); - tags.put(key, vx); + ArrayList l = new ArrayList<String>(); + l.add(value); + tags.put(key, l); } } diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/FlacFile.java b/app/src/main/java/github/daneren2005/dsub/util/tags/FlacFile.java index de3584d1..ce508d2e 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/FlacFile.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/FlacFile.java @@ -23,7 +23,8 @@ import java.util.HashMap; import java.util.Enumeration; -public class FlacFile extends Common { +public class FlacFile extends Common implements PageInfo.PageParser { + private static final int FLAC_TYPE_STREAMINFO = 0; // Basic info about the stream private static final int FLAC_TYPE_COMMENT = 4; // ID of 'VorbisComment's public FlacFile() { @@ -32,54 +33,87 @@ public class FlacFile extends Common { public HashMap getTags(RandomAccessFile s) throws IOException { int xoff = 4; // skip file magic int retry = 64; - int r[]; + boolean need_infos = true; + boolean need_tags = true; + HashMap infos = new HashMap(); HashMap tags = new HashMap(); - + for(; retry > 0; retry--) { - r = parse_metadata_block(s, xoff); - - if(r[2] == FLAC_TYPE_COMMENT) { - tags = parse_vorbis_comment(s, xoff+r[0], r[1]); - break; + PageInfo pi = parse_stream_page(s, xoff); + if(pi.type == FLAC_TYPE_STREAMINFO) { + infos = parse_streaminfo_block(s, xoff+pi.header_len, pi.payload_len); + need_infos = false; + } + if(pi.type == FLAC_TYPE_COMMENT) { + tags = parse_vorbis_comment(s, this, xoff+pi.header_len, pi.payload_len); + need_tags = false; } - - if(r[3] != 0) + + if(pi.last_page == true || (need_tags == false && need_infos == false)) break; // eof reached - + // else: calculate next offset - xoff += r[0] + r[1]; + xoff += pi.header_len + pi.payload_len; + } + + // Copy duration to final hashmap if found in infoblock + if(infos.containsKey("duration")) { + tags.put("duration", infos.get("duration")); } + return tags; } - /* Parses the metadata block at 'offset' and returns - ** [header_size, payload_size, type, stop_after] - */ - private int[] parse_metadata_block(RandomAccessFile s, long offset) throws IOException { - int[] result = new int[4]; + /** + * Parses the metadata block at 'offset' + */ + public PageInfo parse_stream_page(RandomAccessFile s, long offset) throws IOException { byte[] mb_head = new byte[4]; int stop_after = 0; int block_type = 0; int block_size = 0; - + s.seek(offset); if( s.read(mb_head) != 4 ) xdie("failed to read metadata block header"); - + block_size = b2be32(mb_head,0); // read whole header as 32 big endian block_type = (block_size >> 24) & 127; // BIT 1-7 are the type stop_after = (((block_size >> 24) & 128) > 0 ? 1 : 0 ); // BIT 0 indicates the last-block flag block_size = (block_size & 0x00FFFFFF); // byte 1-7 are the size - - // debug("size="+block_size+", type="+block_type+", is_last="+stop_after); - - result[0] = 4; // hardcoded - only returned to be consistent with OGG parser - result[1] = block_size; - result[2] = block_type; - result[3] = stop_after; - - return result; + + PageInfo pi = new PageInfo(); + pi.header_len = 4; // fixed size in flac + pi.payload_len = block_size; + pi.type = block_type; + pi.last_page = (stop_after != 0); + return pi; } - + + /* + ** Returns a hashma with parsed vorbis identification header data + **/ + private HashMap parse_streaminfo_block(RandomAccessFile s, long offset, long pl_len) throws IOException { + HashMap id_hash = new HashMap(); + byte[] buff = new byte[18]; + + if(pl_len >= buff.length) { + s.seek(offset); + s.read(buff); + id_hash.put("blocksize_minimal", (b2be32(buff, 0) >> 16)); + id_hash.put("blocksize_maximal", (b2be32(buff, 0) & 0x0000FFFF)); + id_hash.put("framesize_minimal", (b2be32(buff, 4) >> 8)); + id_hash.put("framesize_maximal", (b2be32(buff, 7) >> 8)); + id_hash.put("sampling_rate", (b2be32(buff, 10) >> 12)); + id_hash.put("channels", ((b2be32(buff, 10) >> 9) & 7) + 1); // 3 bits + id_hash.put("num_samples", b2be32(buff, 14)); // fixme: this is actually 36 bit: the 4 hi bits are discarded due to java + if((Integer)id_hash.get("sampling_rate") > 0) { + int duration = (Integer)id_hash.get("num_samples") / (Integer)id_hash.get("sampling_rate"); + id_hash.put("duration", (int)duration); + } + } + return id_hash; + } + } diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/ID3v2File.java b/app/src/main/java/github/daneren2005/dsub/util/tags/ID3v2File.java index 69668475..4fb7418d 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/ID3v2File.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/ID3v2File.java @@ -1,5 +1,6 @@ /* - * Copyright (C) 2013 Adrian Ulrich <adrian@blinkenlights.ch> + * Copyright (C) 2013-2016 Adrian Ulrich <adrian@blinkenlights.ch> + * Copyright (C) 2017-2018 Google Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,7 +13,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. + * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package github.daneren2005.dsub.util.tags; @@ -21,160 +22,246 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.util.ArrayList; import java.util.HashMap; -import java.util.Locale; +import java.util.Enumeration; + public class ID3v2File extends Common { - private static int ID3_ENC_LATIN = 0x00; - private static int ID3_ENC_UTF16LE = 0x01; - private static int ID3_ENC_UTF16BE = 0x02; - private static int ID3_ENC_UTF8 = 0x03; - + private static final int ID3_ENC_LATIN = 0x00; + private static final int ID3_ENC_UTF16 = 0x01; + private static final int ID3_ENC_UTF16BE = 0x02; + private static final int ID3_ENC_UTF8 = 0x03; + private static final HashMap<String, String> sOggNames; + static { + // ID3v2.3 -> ogg mapping + sOggNames = new HashMap<String, String>(); + sOggNames.put("TIT2", "TITLE"); + sOggNames.put("TALB", "ALBUM"); + sOggNames.put("TPE1", "ARTIST"); + sOggNames.put("TPE2", "ALBUMARTIST"); + sOggNames.put("TYER", "YEAR"); + sOggNames.put("TPOS", "DISCNUMBER"); + sOggNames.put("TRCK", "TRACKNUMBER"); + sOggNames.put("TCON", "GENRE"); + sOggNames.put("TCOM", "COMPOSER"); + // ID3v2.2 3-character names + sOggNames.put("TT2", "TITLE"); + sOggNames.put("TAL", "ALBUM"); + sOggNames.put("TP1", "ARTIST"); + sOggNames.put("TP2", "ALBUMARTIST"); + sOggNames.put("TYE", "YEAR"); + sOggNames.put("TRK", "TRACKNUMBER"); + sOggNames.put("TCO", "GENRE"); + sOggNames.put("TCM", "COMPOSER"); + } + + // Holds a key-value pair + private class TagItem { + String key; + String value; + public TagItem(String key, String value) { + this.key = key; + this.value = value; + } + } + public ID3v2File() { } - + public HashMap getTags(RandomAccessFile s) throws IOException { HashMap tags = new HashMap(); - + final int v2hdr_len = 10; byte[] v2hdr = new byte[v2hdr_len]; - + // read the whole 10 byte header into memory s.seek(0); s.read(v2hdr); - - int id3v = ((b2be32(v2hdr,0))) & 0xFF; // swapped ID3\04 -> ver. ist the first byte - int v3len = ((b2be32(v2hdr,6))); // total size EXCLUDING the this 10 byte header - v3len = ((v3len & 0x7f000000) >> 3) | // for some funky reason, this is encoded as 7*4 bits - ((v3len & 0x007f0000) >> 2) | - ((v3len & 0x00007f00) >> 1) | - ((v3len & 0x0000007f) >> 0) ; - - // debug(">> tag version ID3v2."+id3v); - // debug(">> LEN= "+v3len+" // "+v3len); - + + int v3major = (b2be32(v2hdr, 0)) & 0xFF; // swapped ID3\04 -> ver. ist the first byte + int v3minor = (b2be32(v2hdr, 1)) & 0xFF; // minor version, not used by us. + int v3flags = (b2be32(v2hdr, 2)) & 0xFF; // flags such as extended headers. + int v3len = (b2be32(v2hdr, 6)); // total size EXCLUDING the this 10 byte header + v3len = unsyncsafe(v3len); + + // In 2.4, bit #6 indicates whether or not this file has an extended header + boolean flag_ext_hdr = v3major >= 4 && (v3flags & (1 << 6)) != 0; + + if (flag_ext_hdr) { + // The extended header is at least 6 bytes: + // * 4 byts of size + // * 1 byte numflags + // * 1 byte extended flags + byte[] exthdr = new byte[6]; + long pos = s.getFilePointer(); + s.read(exthdr); + + // we got the length, so we can seek to the header end. + int extlen = (b2be32(exthdr, 0)); + s.seek(pos + extlen); + } + // we should already be at the first frame // so we can start the parsing right now - tags = parse_v3_frames(s, v3len); + tags = parse_v3_frames(s, v3len, v3major); tags.put("_hdrlen", v3len+v2hdr_len); return tags; } - + + /* + ** converts syncsafe integer to Java integer + */ + private int unsyncsafe(int x) { + x = ((x & 0x7f000000) >> 3) | + ((x & 0x007f0000) >> 2) | + ((x & 0x00007f00) >> 1) | + ((x & 0x0000007f) >> 0) ; + return x; + } + + /** + * Calculates the frame length baased on the frame size and the + */ + private int calculateFrameLength(byte[] frame, int offset, int v3major) { + // ID3v2 (aka ID3v2.2) had a 3-byte unencoded length field. + if (v3major < 3) { + return (frame[offset] << 16) + (frame[offset+1] << 8) + frame[offset+2]; + } + int rawlen = b2be32(frame, offset); + // Encoders prior ID3v2.4 did not encode the frame length + if (v3major < 4) { + return rawlen; + } + return unsyncsafe(rawlen); + } + /* Parses all ID3v2 frames at the current position up until payload_len ** bytes were read */ - public HashMap parse_v3_frames(RandomAccessFile s, long payload_len) throws IOException { + public HashMap parse_v3_frames(RandomAccessFile s, long payload_len, int v3major) throws IOException { HashMap tags = new HashMap(); - byte[] frame = new byte[10]; // a frame header is always 10 bytes - long bread = 0; // total amount of read bytes - + // ID3v2 (aka ID3v2.2) had a 6-byte header of a 3-byte name and a 3-byte length. + // ID3v2.3 increased the header size to 10 bytes, with a 4-byte name and a 4-byte length + int namelen = (v3major >= 3 ? 4 : 3); + int headerlen = (v3major >= 3 ? 10 : 6); + byte[] frame = new byte[headerlen]; + long bread = 0; // total amount of read bytes + while(bread < payload_len) { bread += s.read(frame); - String framename = new String(frame, 0, 4); - int slen = b2be32(frame, 4); - + String framename = new String(frame, 0, namelen); + int slen = calculateFrameLength(frame, namelen, v3major); /* Abort on silly sizes */ - if(slen < 1 || slen > 524288) + long bytesRemaining = payload_len - bread; + if(slen < 1 || slen > bytesRemaining) break; - + byte[] xpl = new byte[slen]; bread += s.read(xpl); if(framename.substring(0,1).equals("T")) { - String[] nmzInfo = normalizeTaginfo(framename, xpl); - - for(int i = 0; i < nmzInfo.length; i += 2) { - String oggKey = nmzInfo[i]; - String decPld = nmzInfo[i + 1]; - - if (oggKey.length() > 0 && !tags.containsKey(oggKey)) { - addTagEntry(tags, oggKey, decPld); + TagItem nti = normalizeTaginfo(framename, xpl); + if (nti.key.length() > 0) { + for (TagItem ti : splitTagPayload(nti)) { + addTagEntry(tags, ti.key, ti.value); } } } else if(framename.equals("RVA2")) { // } - + } return tags; } - + + /* Split null-separated tags into individual elements */ + private ArrayList<TagItem> splitTagPayload(TagItem in) { + ArrayList res = new ArrayList<TagItem>(); + int i = 0; + + if (sOggNames.containsValue(in.key)) { + // Only try to split if there are more than two chars and the string does NOT look UTF16 encoded. + if (in.value.length() >= 2 && in.value.charAt(0) != 0 && in.value.charAt(1) != 0) { + for (String item : in.value.split("\0")) { + if (item.length() > 0) { // do not add empty items, avoids thrashing if the string is zero padded. + res.add(new TagItem(in.key, item)); + } + i++; + } + } + } + + if (i == 0) { + res.add(in); + } + return res; + } + /* Converts ID3v2 sillyframes to OggNames */ - private String[] normalizeTaginfo(String k, byte[] v) { - String[] rv = new String[] {"",""}; - HashMap lu = new HashMap<String, String>(); - lu.put("TIT2", "TITLE"); - lu.put("TALB", "ALBUM"); - lu.put("TPE1", "ARTIST"); - - if(lu.containsKey(k)) { + private TagItem normalizeTaginfo(String k, byte[] v) { + TagItem ti = new TagItem("", ""); + if(sOggNames.containsKey(k)) { /* A normal, known key: translate into Ogg-Frame name */ - rv[0] = (String)lu.get(k); - rv[1] = getDecodedString(v); + ti.key = (String)sOggNames.get(k); + ti.value = getDecodedString(v); } else if(k.equals("TXXX")) { /* A freestyle field, ieks! */ String txData[] = getDecodedString(v).split(Character.toString('\0'), 2); /* Check if we got replaygain info in key\0value style */ - if(txData.length == 2) { - if(txData[0].matches("^(?i)REPLAYGAIN_(ALBUM|TRACK)_GAIN$")) { - rv[0] = txData[0].toUpperCase(); /* some tagwriters use lowercase for this */ - rv[1] = txData[1]; - } else { - // Check for replaygain tags just thrown randomly in field - int nextStartIndex = 1; - int startName = txData[1].toLowerCase(Locale.US).indexOf("replaygain_"); - ArrayList<String> parts = new ArrayList<String>(); - while(startName != -1) { - int endName = txData[1].indexOf((char) 0, startName); - if(endName != -1) { - parts.add(txData[1].substring(startName, endName).toUpperCase()); - int endValue = txData[1].indexOf((char) 0, endName + 1); - if(endValue != -1) { - parts.add(txData[1].substring(endName + 1, endValue)); - nextStartIndex = endValue + 1; - } else { - break; - } - } else { - break; - } - - startName = txData[1].toLowerCase(Locale.US).indexOf("replaygain_", nextStartIndex); - } - - if(parts.size() > 0) { - rv = new String[parts.size()]; - rv = parts.toArray(rv); - } - } + if(txData.length == 2 && txData[0].matches("^(?i)REPLAYGAIN_(ALBUM|TRACK)_GAIN$")) { + ti.key = txData[0].toUpperCase(); /* some tagwriters use lowercase for this */ + ti.value = txData[1]; } } - - return rv; + + return ti; } - + /* Converts a raw byte-stream text into a java String */ private String getDecodedString(byte[] raw) { int encid = raw[0] & 0xFF; - int len = raw.length; - String v = ""; + int skip = 1; + String cs = "ISO-8859-1"; + String rv = ""; try { - if(encid == ID3_ENC_LATIN) { - v = new String(raw, 1, len-1, "ISO-8859-1"); - } - else if (encid == ID3_ENC_UTF8) { - v = new String(raw, 1, len-1, "UTF-8"); - } - else if (encid == ID3_ENC_UTF16LE) { - v = new String(raw, 3, len-3, "UTF-16LE"); + switch (encid) { + case ID3_ENC_UTF8: + cs = "UTF-8"; + break; + case ID3_ENC_UTF16BE: + cs = "UTF-16BE"; + skip = 3; + break; + case ID3_ENC_UTF16: + cs = "UTF-16"; + if (raw.length > 4) { + if ((raw[1]&0xFF) == 0xFE && (raw[2]&0XFF) == 0xFF && (raw[3]&0xFF) == 0x00 && (raw[4]&0xFF) == 0x00) { + // buggy tag written by lame?! + raw[3] = raw[2]; + raw[4] = raw[1]; + skip = 3; + } else if((raw[1]&0xFF) == 0xFF && (raw[2]&0XFF) == 0x00 && (raw[3]&0xFF) == 0xFE) { + // ?!, but seen in the wild + raw[2] = raw[1]; + skip = 2; + } + } + break; + case ID3_ENC_LATIN: + default: + // uses defaults } - else if (encid == ID3_ENC_UTF16BE) { - v = new String(raw, 3, len-3, "UTF-16BE"); + + rv = new String(raw, skip, raw.length-skip, cs); + + if (rv.length() > 0 && rv.substring(rv.length()-1).equals("\0")) { + // SOME tag writers seem to null terminate strings, some don't... + rv = rv.substring(0, rv.length()-1); } } catch(Exception e) {} - return v; + return rv; } - + } diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/LameHeader.java b/app/src/main/java/github/daneren2005/dsub/util/tags/LameHeader.java index 720ee87f..691c7279 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/LameHeader.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/LameHeader.java @@ -1,5 +1,6 @@ /* * Copyright (C) 2013 Adrian Ulrich <adrian@blinkenlights.ch> + * Copyright (C) 2017 Google Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,28 +20,122 @@ package github.daneren2005.dsub.util.tags; import java.io.IOException; import java.io.RandomAccessFile; +import java.util.Arrays; import java.util.HashMap; import java.util.Enumeration; public class LameHeader extends Common { - + + // Sampling rate version -> field mapping + private static int[][] sampleRates = { + { 11025, 12000, 8000 }, // MPEG2.5 (idx = 0) + { 0, 0, 0 }, // reserved (idx = 1) + { 22050, 24000, 16000 }, // MPEG2 (idx = 2) + { 44100, 48000, 32000 }, // MPEG1 (idx = 3) + }; + + // SamplesPerFrame layer -> version mapping + private static int[][] samplesPerFrame = { + // reserved, layer3, layer2, layer1 + { 0, 576, 1152, 384 }, // MPEG2.5 + { 0, 0, 0, 0 }, // RESERVED + { 0, 576, 1152, 384 }, // MPEG2 + { 0, 1152, 1152, 384 }, // MPEG1 + }; + + public LameHeader() { } public HashMap getTags(RandomAccessFile s) throws IOException { - return parseLameHeader(s, 0); + HashMap rgain = parseLameHeader(s, 0); + HashMap tags = parseV1Header(s, s.length()-128); + + // Add replay gain info to returned object if available + for (String k : Arrays.asList("REPLAYGAIN_TRACK_GAIN", "REPLAYGAIN_ALBUM_GAIN")) { + if (rgain.containsKey(k)) + tags.put(k, rgain.get(k)); + } + + return tags; } - + + /** + * Attempts to parse ID3v1(.1) information from given RandomAccessFile + * + * @param s the seekable RandomAccessFile + * @param offset position of the ID3v1 tag + */ + private HashMap parseV1Header(RandomAccessFile s, long offset) throws IOException { + HashMap tags = new HashMap(); + byte[] tag = new byte[3]; + byte[] year = new byte[4]; + byte[] str = new byte[30]; + + s.seek(offset); + s.read(tag); + + if("TAG".equals(new String(tag))) { + for (String name : Arrays.asList("TITLE", "ARTIST", "ALBUM")) { + s.read(str); + String value = new String(str, "ISO-8859-1").trim(); + if (value.length() > 0) + addTagEntry(tags, name, value); + } + + // year is a string for whatever reason... + s.read(year); + String y = new String(year).trim(); + if (y.length() > 0) + addTagEntry(tags, "YEAR", y); + + s.skipBytes(28); // skip comment field + s.read(tag); + + if (tag[0] == 0 && tag[1] != 0) // tag[0] == 0 -> is id3v1.1 compatible + addTagEntry(tags, "TRACKNUMBER", String.format("%d", tag[1])); + + if (tag[2] != 0) + addTagEntry(tags, "GENRE", String.format("%d", tag[2])); + } + + + return tags; + } + public HashMap parseLameHeader(RandomAccessFile s, long offset) throws IOException { HashMap tags = new HashMap(); - byte[] chunk = new byte[4]; + byte[] chunk = new byte[12]; s.seek(offset + 0x24); s.read(chunk); - String lameMark = new String(chunk, 0, chunk.length, "ISO-8859-1"); - + String lameMark = new String(chunk, 0, 4, "ISO-8859-1"); + int flags = b2u(chunk[7]); + + if((flags & 0x01) !=0 ) { // header indicates that totalFrames field is present + int total_frames = b2be32(chunk, 8); + s.seek(offset); + s.read(chunk); + + int mpeg_hdr = b2be32(chunk, 0); + int srate_idx = (mpeg_hdr >> 10) & 3; // sampling rate index at bit 10-11 + int layer_idx = (mpeg_hdr >> 17) & 3; // layer index value bit 17-18 + int ver_idx = (mpeg_hdr >> 19) & 3; // version index value bit 19-20 + + // Try to calculate song duration if all indexes are sane + if (ver_idx < sampleRates.length && srate_idx < sampleRates[ver_idx].length && layer_idx < samplesPerFrame[ver_idx].length) { + int sample_rate = sampleRates[ver_idx][srate_idx]; + int sample_pfr = samplesPerFrame[ver_idx][layer_idx]; + if (sample_rate > 0 && sample_pfr > 0) { + double duration = ((double)sample_pfr / (double)sample_rate) * total_frames; + tags.put("duration", (int)duration); + } + } + + } + if(lameMark.equals("Info") || lameMark.equals("Xing")) { s.seek(offset+0xAB); s.read(chunk); diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/Mp4File.java b/app/src/main/java/github/daneren2005/dsub/util/tags/Mp4File.java new file mode 100644 index 00000000..db00bf6a --- /dev/null +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/Mp4File.java @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2016 Ian Harmon + * Copyright (C) 2017 Google Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +package github.daneren2005.dsub.util.tags; + +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Stack; + +/* +* Helper class for tracking the traversal of the atom tree +*/ +class Atom { + String name; + long start; + int length; + + public Atom(String name, long start, int length) { + this.name = name; + this.start = start; + this.length = length; + } +} + +/* +* MP4 tag parser +*/ +public class Mp4File extends Common { + + // only these tags are returned. others may be parsed, but discarded. + final static List<String> ALLOWED_TAGS = Arrays.asList( + "replaygain_track_gain", + "replaygain_album_gain", + "title", + "album", + "artist", + "albumartist", + "composer", + "genre", + "year", + "tracknumber", + "discnumber" + ); + + // mapping between atom <-> vorbis tags + final static HashMap<String, String> ATOM_TAGS; + static { + ATOM_TAGS = new HashMap<String, String>(); + ATOM_TAGS.put("�nam", "title"); + ATOM_TAGS.put("�alb", "album"); + ATOM_TAGS.put("�ART", "artist"); + ATOM_TAGS.put("aART", "albumartist"); + ATOM_TAGS.put("�wrt", "composer"); + ATOM_TAGS.put("�gen", "genre"); + ATOM_TAGS.put("�day", "year"); + ATOM_TAGS.put("trkn", "tracknumber"); + ATOM_TAGS.put("disk", "discnumber"); + } + + // These tags are 32bit integers, not strings. + final static List<String> BINARY_TAGS = Arrays.asList( + "tracknumber", + "discnumber" + ); + + // maximum size for tag names or values + final static int MAX_BUFFER_SIZE = 512; + // only used when developing + final static boolean PRINT_DEBUG = false; + + // When processing atoms, we first read the atom length (4 bytes), + // and then the atom name (also 4 bytes). This value should not be changed. + final static int ATOM_HEADER_SIZE = 8; + + /* + * Traverses the atom structure of an MP4 file and returns as soon as tags + * are parsed + */ + public HashMap getTags(RandomAccessFile s) throws IOException { + HashMap tags = new HashMap(); + if (PRINT_DEBUG) { System.out.println(); } + try { + + // maintain a trail of breadcrumbs to know what part of the file we're in, + // so e.g. that we only parse [name] atoms that are part of a tag + Stack<Atom> path = new Stack<Atom>(); + + s.seek(0); + int atomSize; + byte[] atomNameRaw = new byte[4]; + String atomName; + String tagName = null; + + // begin traversing the file + // file structure info from http://atomicparsley.sourceforge.net/mpeg-4files.html + while (s.getFilePointer() < s.length()) { + + // if we've read/skipped past the end of atoms, remove them from the path stack + while (!path.empty() && s.getFilePointer() >= (path.peek().start + path.peek().length)) { + // if we've finished the tag atom [ilst], we can stop parsing. + // when tags are read successfully, this should be the exit point for the parser. + if (path.peek().name.equals("ilst")) { + if (PRINT_DEBUG) { System.out.println(); } + return tags; + } + path.pop(); + } + + // read a new atom's details + atomSize = s.readInt(); + + // return if we're unable to parse an atom size + // (e.g. previous atoms were parsed incorrectly and the + // file pointer is misaligned) + if (atomSize <= 0) { return tags; } + + s.read(atomNameRaw); + atomName = new String(atomNameRaw); + + // determine if we're currently decending through the hierarchy + // to a tag atom + boolean approachingTagAtom = false; + boolean onMetaAtom = false; + boolean onTagAtom = false; + String fourAtom = null; + // compare everything in the current path hierarchy and the new atom as well + // this is a bit repetitive as-is, but shouldn't be noticeable + for (int i = 0; i <= path.size(); i++) { + String thisAtomName = (i < path.size()) ? path.get(i).name : atomName; + if ((i == 0 && thisAtomName.equals("moov")) || + (i == 1 && thisAtomName.equals("udta")) || + (i == 2 && thisAtomName.equals("meta")) || + (i == 3 && thisAtomName.equals("ilst")) || + (i == 4 && thisAtomName.equals("----")) || + (i == 4 && ATOM_TAGS.containsKey(thisAtomName)) || + (i == 5 && (thisAtomName.equals("name") || thisAtomName.equals("data"))) + ) { + approachingTagAtom = true; + // if we're at the end of the current hierarchy, mark if it's the [meta] or a tag atom. + if (i == path.size()) { + onMetaAtom = thisAtomName.equals("meta"); + onTagAtom = (thisAtomName.equals("name") || thisAtomName.equals("data")); + } + // depth is 4 and this is a known atom: rembemer this! + if (i == 4 && ATOM_TAGS.containsKey(thisAtomName)) { + fourAtom = ATOM_TAGS.get(thisAtomName); + } + } + // quit as soon as we know we're not on the road to a tag atom + else { + approachingTagAtom = false; + break; + } + } + + // add the new atom to the path hierarchy + path.push(new Atom(atomName, s.getFilePointer()-ATOM_HEADER_SIZE, atomSize)); + if (PRINT_DEBUG) { printDebugAtomPath(s, path, atomName, atomSize); } + + // skip all non-pertinent atoms + if (!approachingTagAtom) { s.skipBytes(atomSize-ATOM_HEADER_SIZE); } + // dive into tag-related ones + else { + // the meta atom has an extra 4 bytes that need to be skipped + if (onMetaAtom) { s.skipBytes(4); } + + // read tag contents when there + if (onTagAtom) { + // get a tag name + if (atomName.equals("name")) { + // skip null bytes + s.skipBytes(4); + tagName = new String(readIntoBuffer(s, atomSize-(ATOM_HEADER_SIZE+4))); + } + + // get a tag value + else if (atomName.equals("data")) { + // skip flags/null bytes + s.skipBytes(8); + + // use the 'fourAtom' value if we did not have a tag name + tagName = (tagName == null ? fourAtom : tagName); + // read the tag + byte[] tagBuffer = readIntoBuffer(s, atomSize-(ATOM_HEADER_SIZE+8)); + + if (ALLOWED_TAGS.contains(tagName)) + { + String tagValue = (BINARY_TAGS.contains(tagName) ? String.format("%d", b2be32(tagBuffer, 0)) : new String(tagBuffer, "UTF-8")); + if (PRINT_DEBUG) { + System.out.println(String.format("parsed tag '%s': '%s'\n", tagName, tagValue)); + } + addTagEntry(tags, tagName.toUpperCase(), tagValue); + } + // This is the end of this tree, make sure that we don't re-use tagName in any other tree + tagName = null; + } + } + } + } + // End of while loop, the file has been completely read through. + // The parser should only return here if the tags atom [ilst] was missing. + return tags; + } + // if anything goes wrong, just return whatever we already have + catch (Exception e) { + return tags; + } + } + + /* + * Reads bytes from an atom up to the buffer size limit, currently 512B + */ + private byte[] readIntoBuffer(RandomAccessFile s, int dataSize) throws IOException { + // read tag up to buffer limit + int bufferSize = Math.min(dataSize, MAX_BUFFER_SIZE); + byte[] buffer = new byte[bufferSize]; + s.read(buffer, 0, buffer.length); + if (dataSize > bufferSize) { + s.skipBytes(dataSize - bufferSize); + } + return buffer; + } + + /* + * Can be used when traversing the atom hierarchy to print the tree of atoms + */ + private void printDebugAtomPath(RandomAccessFile s, Stack<Atom> path, + String atomName, int atomSize) throws IOException + { + String treeLines = ""; + for (int i = 0; i < path.size(); i++) { treeLines += ". "; } + long atomStart = s.getFilePointer()-ATOM_HEADER_SIZE; + System.out.println(String.format("%-22s %8d to %8d, length %8d", + (treeLines + "[" + atomName + "]"), atomStart, (atomStart+atomSize), atomSize)); + } +} diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/OggFile.java b/app/src/main/java/github/daneren2005/dsub/util/tags/OggFile.java index d0b31671..0383c68a 100644 --- a/app/src/main/java/github/daneren2005/dsub/util/tags/OggFile.java +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/OggFile.java @@ -23,10 +23,11 @@ import java.io.RandomAccessFile; import java.util.HashMap; -public class OggFile extends Common { +public class OggFile extends Common implements PageInfo.PageParser { - private static final int OGG_PAGE_SIZE = 27; // Static size of an OGG Page - private static final int OGG_TYPE_COMMENT = 3; // ID of 'VorbisComment's + private static final int OGG_PAGE_SIZE = 27; // Static size of an OGG Page + private static final int OGG_TYPE_IDENTIFICATION = 1; // Identification header + private static final int OGG_TYPE_COMMENT = 3; // ID of 'VorbisComment's public OggFile() { } @@ -34,24 +35,46 @@ public class OggFile extends Common { public HashMap getTags(RandomAccessFile s) throws IOException { long offset = 0; int retry = 64; + boolean need_tags = true; + boolean need_id = true; + HashMap tags = new HashMap(); + HashMap identification = new HashMap(); for( ; retry > 0 ; retry-- ) { - long res[] = parse_ogg_page(s, offset); - if(res[2] == OGG_TYPE_COMMENT) { - tags = parse_ogg_vorbis_comment(s, offset+res[0], res[1]); + PageInfo pi = parse_stream_page(s, offset); + if(pi.type == OGG_TYPE_IDENTIFICATION) { + identification = parse_ogg_vorbis_identification(s, offset+pi.header_len, pi.payload_len); + need_id = false; + } else if(pi.type == OGG_TYPE_COMMENT) { + tags = parse_ogg_vorbis_comment(s, offset+pi.header_len, pi.payload_len); + need_tags = false; + } + offset += pi.header_len + pi.payload_len; + if (need_tags == false && need_id == false) { break; } - offset += res[0] + res[1]; } + + // Calculate duration in seconds + // Note that this calculation is WRONG: We would have to get the last + // packet to calculate the real length - but this is goot enough. + if (identification.containsKey("bitrate_nominal")) { + int br_nom = (Integer)identification.get("bitrate_nominal") / 8; + long file_length = s.length(); + if (file_length > 0 && br_nom > 0) { + tags.put("duration", (int)(file_length/br_nom)); + } + } + return tags; } - /* Parses the ogg page at offset 'offset' and returns - ** [header_size, payload_size, type] - */ - private long[] parse_ogg_page(RandomAccessFile s, long offset) throws IOException { + /** + * Parses the ogg page at offset 'offset' + */ + public PageInfo parse_stream_page(RandomAccessFile s, long offset) throws IOException { long[] result = new long[3]; // [header_size, payload_size] byte[] p_header = new byte[OGG_PAGE_SIZE]; // buffer for the page header byte[] scratch; @@ -78,18 +101,18 @@ public class OggFile extends Common { psize += b2u(scratch[i]); } } - - // populate result array - result[0] = (s.getFilePointer() - offset); - result[1] = psize; - result[2] = -1; - + + PageInfo pi = new PageInfo(); + pi.header_len = (s.getFilePointer() - offset); + pi.payload_len = psize; + pi.type = -1; + /* next byte is most likely the type -> pre-read */ if(psize >= 1 && s.read(p_header, 0, 1) == 1) { - result[2] = b2u(p_header[0]); + pi.type = b2u(p_header[0]); } - - return result; + + return pi; } /* In 'vorbiscomment' field is prefixed with \3vorbis in OGG files @@ -108,7 +131,37 @@ public class OggFile extends Common { if( (new String(pfx, 0, pfx_len)).equals("\3vorbis") == false ) xdie("Damaged packet found!"); - return parse_vorbis_comment(s, offset+pfx_len, pl_len-pfx_len); + return parse_vorbis_comment(s, this, offset+pfx_len, pl_len-pfx_len); } - + + /* + ** Returns a hashma with parsed vorbis identification header data + **/ + private HashMap parse_ogg_vorbis_identification(RandomAccessFile s, long offset, long pl_len) throws IOException { + /* Structure: + * 7 bytes of \1vorbis + * 4 bytes version + * 1 byte channels + * 4 bytes sampling rate + * 4 bytes bitrate max + * 4 bytes bitrate nominal + * 4 bytes bitrate min + **/ + HashMap id_hash = new HashMap(); + byte[] buff = new byte[28]; + + if(pl_len >= buff.length) { + s.seek(offset); + s.read(buff); + id_hash.put("version" , b2le32(buff, 7)); + id_hash.put("channels" , b2u(buff[11])); + id_hash.put("sampling_rate" , b2le32(buff, 12)); + id_hash.put("bitrate_minimal" , b2le32(buff, 16)); + id_hash.put("bitrate_nominal" , b2le32(buff, 20)); + id_hash.put("bitrate_maximal" , b2le32(buff, 24)); + } + + return id_hash; + } + }; diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/OpusFile.java b/app/src/main/java/github/daneren2005/dsub/util/tags/OpusFile.java new file mode 100644 index 00000000..38b402bb --- /dev/null +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/OpusFile.java @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2015 Adrian Ulrich <adrian@blinkenlights.ch> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +package github.daneren2005.dsub.util.tags; + + +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.HashMap; + + +public class OpusFile extends OggFile { + // A list of tags we are going to ignore in the OpusTags section + public static final String[] FORBIDDEN_TAGS = {"REPLAYGAIN_TRACK_GAIN", "REPLAYGAIN_TRACK_PEAK", "REPLAYGAIN_ALBUM_GAIN", "REPLAYGAIN_ALBUM_PEAK"}; + + public OpusFile() { + } + + public HashMap getTags(RandomAccessFile s) throws IOException { + + // The opus specification is very strict: The first packet MUST + // contain the OpusHeader while the 2nd MUST contain the + // OggHeader payload: https://wiki.xiph.org/OggOpus + long pos = 0; + PageInfo pi = parse_stream_page(s, pos); + + HashMap tags = new HashMap(); + HashMap opus_head = parse_opus_head(s, pos+pi.header_len, pi.payload_len); + pos += pi.header_len+pi.payload_len; + + // Check if we parsed a version number and ensure it doesn't have any + // of the upper 4 bits set (eg: <= 15) + if(opus_head.containsKey("version") && (Integer)opus_head.get("version") <= 0xF) { + // Get next page: The spec requires this to be an OpusTags head + pi = parse_stream_page(s, pos); + tags = parse_opus_vorbis_comment(s, pos+pi.header_len, pi.payload_len); + // ...and merge replay gain intos into the tags map + calculate_gain(opus_head, tags); + } + + return tags; + } + + /** + * Adds replay gain information to the tags hash map + */ + private void calculate_gain(HashMap header, HashMap tags) { + // Remove any unacceptable tags (Opus files must not have + // their own REPLAYGAIN_* fields) + for(String k : FORBIDDEN_TAGS) { + tags.remove(k); + } + // Include the gain value found in the opus header + int header_gain = (Integer)header.get("header_gain"); + addTagEntry(tags, "R128_BASTP_BASE_GAIN", ""+header_gain); + } + + + /** + * Attempts to parse an OpusHead block at given offset. + * Returns an hash-map, will be empty on failure + */ + private HashMap parse_opus_head(RandomAccessFile s, long offset, long pl_len) throws IOException { + /* Structure: + * 8 bytes of 'OpusHead' + * 1 byte version + * 1 byte channel count + * 2 bytes pre skip + * 4 bytes input-sample-rate + * 2 bytes outputGain as Q7.8 + * 1 byte channel map + * --> 19 bytes + */ + + HashMap id_hash = new HashMap(); + byte[] buff = new byte[19]; + if(pl_len >= buff.length) { + s.seek(offset); + s.read(buff); + if((new String(buff, 0, 8)).equals("OpusHead")) { + id_hash.put("version" , b2u(buff[8])); + id_hash.put("channels" , b2u(buff[9])); + id_hash.put("pre_skip" , b2le16(buff, 10)); + id_hash.put("sampling_rate", b2le32(buff, 12)); + id_hash.put("header_gain" , (int)((short)b2le16(buff, 16))); + id_hash.put("channel_map" , b2u(buff[18])); + } + } + + return id_hash; + } + + /** + * Parses an OpusTags section + * Returns a hash map of the found tags + */ + private HashMap parse_opus_vorbis_comment(RandomAccessFile s, long offset, long pl_len) throws IOException { + final int magic_len = 8; // OpusTags + byte[] magic = new byte[magic_len]; + + if(pl_len < magic_len) + xdie("opus comment field is too short!"); + + // Read and check magic signature + s.seek(offset); + s.read(magic); + + if((new String(magic, 0, magic_len)).equals("OpusTags") == false) + xdie("Damaged packet found!"); + + return parse_vorbis_comment(s, this, offset+magic_len, pl_len-magic_len); + } + +} diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/PageInfo.java b/app/src/main/java/github/daneren2005/dsub/util/tags/PageInfo.java new file mode 100644 index 00000000..1081e355 --- /dev/null +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/PageInfo.java @@ -0,0 +1,16 @@ +package github.daneren2005.dsub.util.tags; + +import java.io.IOException; +import java.io.RandomAccessFile; + +public class PageInfo { + + long header_len; + long payload_len; + int type; + boolean last_page; + + public static interface PageParser { + PageInfo parse_stream_page(RandomAccessFile fh, long offset) throws IOException; + } +} diff --git a/app/src/main/java/github/daneren2005/dsub/util/tags/RawFile.java b/app/src/main/java/github/daneren2005/dsub/util/tags/RawFile.java new file mode 100644 index 00000000..2fa6cf30 --- /dev/null +++ b/app/src/main/java/github/daneren2005/dsub/util/tags/RawFile.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2017 Google Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +package github.daneren2005.dsub.util.tags; + +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.HashMap; + +public class RawFile extends Common { + + /** + * Returns the tags of a Raw File which is just an empty HashMap. + * This shall be used for raw streams with no (supported) tags. + */ + public HashMap getTags(RandomAccessFile s) throws IOException { + HashMap tags = new HashMap(); + return tags; + } +} + |