base58: add Decode32
This commit is contained in:
parent
dd4f5d4d6c
commit
85ff933995
|
@ -8,11 +8,31 @@
|
|||
// Original author: Philip Taffet <phtaffet@jumptrading.com>
|
||||
package base58
|
||||
|
||||
import "encoding/binary"
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// alphabet maps [0, 58) to the base58 character.
|
||||
const alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
||||
|
||||
// inverseLUT maps (character value - '1') to [0, 58).
|
||||
var inverseLUT = [75]byte{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
|
||||
0xFF, 0x11, 0x12, 0x13, 0x14, 0x15, 0xFF, 0x16,
|
||||
0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
|
||||
0x1F, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
|
||||
0x29, 0x2A, 0x2B, 0xFF, 0x2C, 0x2D, 0x2E, 0x2F,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
0x38, 0x39, 0xFF,
|
||||
}
|
||||
|
||||
const invalidChar uint8 = 0xFF
|
||||
const inverseLUTOffset uint8 = '1'
|
||||
const inverseLUTSentinel uint8 = 1 + 'z' - inverseLUTOffset
|
||||
|
||||
// encTable32 contains the unique values less than 58^5 such that:
|
||||
//
|
||||
// 2^(32*(7-j)) = sum_k table[j][k]*58^(5*(7-k))
|
||||
|
@ -30,6 +50,21 @@ var encTable32 = [9][8]uint32{
|
|||
{0, 0, 0, 0, 0, 0, 0, 1},
|
||||
}
|
||||
|
||||
// decTable32 contains the unique values less than 2^32 such that:
|
||||
//
|
||||
// 58^(5*(8-j)) = sum_k table[j][k]*2^(32*(7-k))
|
||||
var decTable32 = [9][8]uint32{
|
||||
{1277, 2650397687, 3801011509, 2074386530, 3248244966, 687255411, 2959155456, 0},
|
||||
{0, 8360, 1184754854, 3047609191, 3418394749, 132556120, 1199103528, 0},
|
||||
{0, 0, 54706, 2996985344, 1834629191, 3964963911, 485140318, 1073741824},
|
||||
{0, 0, 0, 357981, 1476998812, 3337178590, 1483338760, 4194304000},
|
||||
{0, 0, 0, 0, 2342503, 3052466824, 2595180627, 17825792},
|
||||
{0, 0, 0, 0, 0, 15328518, 1933902296, 4063920128},
|
||||
{0, 0, 0, 0, 0, 0, 100304420, 3355157504},
|
||||
{0, 0, 0, 0, 0, 0, 0, 656356768},
|
||||
{0, 0, 0, 0, 0, 0, 0, 1},
|
||||
}
|
||||
|
||||
func Encode32(out *[44]byte, in [32]byte) uint {
|
||||
const raw58sz = 45
|
||||
|
||||
|
@ -142,3 +177,96 @@ func Encode32(out *[44]byte, in [32]byte) uint {
|
|||
|
||||
return raw58sz - skip
|
||||
}
|
||||
|
||||
func Decode32(out *[32]byte, encoded []byte) (ok bool) {
|
||||
// Check length
|
||||
if len(encoded) < 32 || len(encoded) > 44 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Validate string
|
||||
for _, c := range encoded {
|
||||
idx := int(c) - int(inverseLUTOffset)
|
||||
if idx > int(inverseLUTSentinel) {
|
||||
idx = int(inverseLUTSentinel)
|
||||
}
|
||||
if inverseLUT[idx] == invalidChar {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// X = sum_i raw_base58[i] * 58^(RAW58_SZ-1-i)
|
||||
var rawBase58 [45]byte
|
||||
|
||||
// Prepend enough 0s to make it exactly RAW58_SZ characters
|
||||
prepend0 := 45 - len(encoded)
|
||||
for j := 0; j < 45; j++ {
|
||||
if j >= int(prepend0) {
|
||||
rawBase58[j] = inverseLUT[encoded[j-int(prepend0)]-inverseLUTOffset]
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to the intermediate format
|
||||
// X = sum_i intermediate[i] * 58^(5*(INTERMEDIATE_SZ-1-i))
|
||||
var intermediate [9]uint64
|
||||
for i := 0; i < 9; i++ {
|
||||
intermediate[i] = uint64(rawBase58[5*i+0])*11316496 +
|
||||
uint64(rawBase58[5*i+1])*195112 +
|
||||
uint64(rawBase58[5*i+2])*3364 +
|
||||
uint64(rawBase58[5*i+3])*58 +
|
||||
uint64(rawBase58[5*i+4])
|
||||
}
|
||||
|
||||
// Using the table, convert to overcomplete base 2^32 (terms can be
|
||||
// larger than 2^32). We need to be careful about overflow.
|
||||
//
|
||||
// For N==32, the largest anything in binary can get is binary[7]:
|
||||
// even if intermediate[i]==58^5-1 for all i, then binary[7] < 2^63.
|
||||
var binary_ [8]uint64
|
||||
for j := 0; j < 8; j++ {
|
||||
var acc uint64
|
||||
for i := 0; i < 9; i++ {
|
||||
acc += uint64(intermediate[i]) * uint64(decTable32[i][j])
|
||||
}
|
||||
binary_[j] = acc
|
||||
}
|
||||
|
||||
// Make sure each term is less than 2^32.
|
||||
//
|
||||
// For N==32, we have plenty of headroom in binary, so overflow is
|
||||
// not a concern this time.
|
||||
for i := 7; i > 0; i-- {
|
||||
binary_[i-1] += binary_[i] >> 32
|
||||
binary_[i] &= 0xFFFFFFFF
|
||||
}
|
||||
|
||||
// If the largest term is 2^32 or bigger, it means N is larger than
|
||||
// what can fit in BYTE_CNT bytes. This can be triggered, by passing
|
||||
// a base58 string of all 'z's for example.
|
||||
if binary_[0] > 0xFFFFFFFF {
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert each term to big endian for the final output
|
||||
for i := 0; i < 8; i++ {
|
||||
binary.BigEndian.PutUint32(out[4*i:], uint32(binary_[i]))
|
||||
}
|
||||
|
||||
// Make sure the encoded version has the same number of leading '1's
|
||||
// as the decoded version has leading 0s. The check doesn't read past
|
||||
// the end of encoded, because '\0' != '1', so it will return NULL.
|
||||
var leadingZeroCnt int
|
||||
for leadingZeroCnt = 0; leadingZeroCnt < 32; leadingZeroCnt++ {
|
||||
if out[leadingZeroCnt] != 0 {
|
||||
break
|
||||
}
|
||||
if encoded[leadingZeroCnt] != '1' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if leadingZeroCnt < len(encoded) && encoded[leadingZeroCnt] == '1' {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -45,3 +45,18 @@ func TestEncode32(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode32(t *testing.T) {
|
||||
for _, test := range testVector32 {
|
||||
var out [32]byte
|
||||
if !Decode32(&out, []byte(test.b58)) {
|
||||
t.Errorf("Decode32(%s) failed", test.b58)
|
||||
continue
|
||||
}
|
||||
|
||||
outStr := hex.EncodeToString(out[:])
|
||||
if outStr != test.hex {
|
||||
t.Errorf("Decode32(%s) = %s, want %s", test.b58, outStr, test.hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue