Skip to content

Commit

Permalink
Start converting hasher functions to methods.
Browse files Browse the repository at this point in the history
  • Loading branch information
andybalholm committed Mar 8, 2019
1 parent 6a14da6 commit bbbdedf
Show file tree
Hide file tree
Showing 17 changed files with 212 additions and 332 deletions.
2 changes: 1 addition & 1 deletion encode.go
Original file line number Diff line number Diff line change
Expand Up @@ -1598,7 +1598,7 @@ func BrotliCompressBufferQuality10(lgwin int, input_size uint, input_buffer []by
var path_size uint
var new_cmd_alloc_size uint
BrotliInitZopfliNodes(nodes, block_size+1)
StitchToPreviousBlockH10(hasher, block_size, block_start, input_buffer, mask)
hasher.StitchToPreviousBlock(block_size, block_start, input_buffer, mask)
path_size = BrotliZopfliComputeShortestPath(block_size, block_start, input_buffer, mask, &params, dist_cache[:], hasher, nodes)

/* We allocate a command buffer in the first iteration of this loop that
Expand Down
23 changes: 10 additions & 13 deletions h10.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,20 +45,18 @@ func ForestH10(self *H10) []uint32 {
return []uint32(self.forest)
}

func InitializeH10(handle HasherHandle, params *BrotliEncoderParams) {
var self *H10 = SelfH10(handle)
self.window_mask_ = (1 << params.lgwin) - 1
self.invalid_pos_ = uint32(0 - self.window_mask_)
func (h *H10) Initialize(params *BrotliEncoderParams) {
h.window_mask_ = (1 << params.lgwin) - 1
h.invalid_pos_ = uint32(0 - h.window_mask_)
var num_nodes uint = uint(1) << params.lgwin
self.forest = make([]uint32, 2*num_nodes)
h.forest = make([]uint32, 2*num_nodes)
}

func PrepareH10(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H10 = SelfH10(handle)
var invalid_pos uint32 = self.invalid_pos_
func (h *H10) Prepare(one_shot bool, input_size uint, data []byte) {
var invalid_pos uint32 = h.invalid_pos_
var i uint32
for i = 0; i < 1<<17; i++ {
self.buckets_[i] = invalid_pos
h.buckets_[i] = invalid_pos
}
}

Expand Down Expand Up @@ -261,8 +259,7 @@ func StoreRangeH10(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}

func StitchToPreviousBlockH10(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
var self *H10 = SelfH10(handle)
func (h *H10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH10()-1 && position >= 128 {
var i_start uint = position - 128 + 1
var i_end uint = brotli_min_size_t(position, i_start+num_bytes)
Expand All @@ -276,12 +273,12 @@ func StitchToPreviousBlockH10(handle HasherHandle, num_bytes uint, position uint
Furthermore, we have to make sure that we don't look further back
from the start of the next block than the window size, otherwise we
could access already overwritten areas of the ring-buffer. */
var max_backward uint = self.window_mask_ - brotli_max_size_t(BROTLI_WINDOW_GAP-1, position-i)
var max_backward uint = h.window_mask_ - brotli_max_size_t(BROTLI_WINDOW_GAP-1, position-i)

/* We know that i + 128 <= position + num_bytes, i.e. the
end of the current block and that we have at least
128 tail in the ring-buffer. */
StoreAndFindMatchesH10(self, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
StoreAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
}
}
}
Expand Down
19 changes: 9 additions & 10 deletions h2.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,27 +43,26 @@ func SelfH2(handle HasherHandle) *H2 {
return handle.(*H2)
}

func InitializeH2(handle HasherHandle, params *BrotliEncoderParams) {
func (*H2) Initialize(params *BrotliEncoderParams) {
}

func PrepareH2(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H2 = SelfH2(handle)
func (h *H2) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 16) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH2(data[i:])
self.buckets_[key] = 0
h.buckets_[key] = 0
}
} else {
/* It is not strictly necessary to fill this buffer here, but
not filling will make the results of the compression stochastic
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
Expand All @@ -85,15 +84,15 @@ func StoreRangeH2(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}

func StitchToPreviousBlockH2(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H2) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH2()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH2(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH2(h, ringbuffer, ringbuffer_mask, position-3)

StoreH2(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH2(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH2(h, ringbuffer, ringbuffer_mask, position-2)
StoreH2(h, ringbuffer, ringbuffer_mask, position-1)
}
}

Expand Down
19 changes: 9 additions & 10 deletions h3.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,18 @@ func SelfH3(handle HasherHandle) *H3 {
return handle.(*H3)
}

func InitializeH3(handle HasherHandle, params *BrotliEncoderParams) {
func (*H3) Initialize(params *BrotliEncoderParams) {
}

func PrepareH3(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H3 = SelfH3(handle)
func (h *H3) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 16) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH3(data[i:])
for i := 0; i < int(2); i++ {
self.buckets_[key:][i] = 0
h.buckets_[key:][i] = 0
}
}
} else {
Expand All @@ -60,8 +59,8 @@ func PrepareH3(handle HasherHandle, one_shot bool, input_size uint, data []byte)
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
Expand All @@ -83,15 +82,15 @@ func StoreRangeH3(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}

func StitchToPreviousBlockH3(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H3) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH3()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH3(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH3(h, ringbuffer, ringbuffer_mask, position-3)

StoreH3(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH3(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH3(h, ringbuffer, ringbuffer_mask, position-2)
StoreH3(h, ringbuffer, ringbuffer_mask, position-1)
}
}

Expand Down
41 changes: 19 additions & 22 deletions h35.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,42 +42,40 @@ func SelfH35(handle HasherHandle) *H35 {
return handle.(*H35)
}

func InitializeH35(handle HasherHandle, params *BrotliEncoderParams) {
var self *H35 = SelfH35(handle)
self.ha = nil
self.hb = nil
self.params = params
func (h *H35) Initialize(params *BrotliEncoderParams) {
h.ha = nil
h.hb = nil
h.params = params
}

/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializeH35 */
func PrepareH35(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H35 = SelfH35(handle)
if self.ha == nil {
func (h *H35) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *HasherCommon
var common_b *HasherCommon

self.ha = new(H3)
common_a = self.ha.Common()
common_a.params = self.params.hasher
h.ha = new(H3)
common_a = h.ha.Common()
common_a.params = h.params.hasher
common_a.is_prepared_ = false
common_a.dict_num_lookups = 0
common_a.dict_num_matches = 0
InitializeH3(self.ha, self.params)
h.ha.Initialize(h.params)

self.hb = new(HROLLING_FAST)
common_b = self.hb.Common()
common_b.params = self.params.hasher
h.hb = new(HROLLING_FAST)
common_b = h.hb.Common()
common_b.params = h.params.hasher
common_b.is_prepared_ = false
common_b.dict_num_lookups = 0
common_b.dict_num_matches = 0
InitializeHROLLING_FAST(self.hb, self.params)
h.hb.Initialize(h.params)
}

PrepareH3(self.ha, one_shot, input_size, data)
PrepareHROLLING_FAST(self.hb, one_shot, input_size, data)
h.ha.Prepare(one_shot, input_size, data)
h.hb.Prepare(one_shot, input_size, data)
}

func StoreH35(handle HasherHandle, data []byte, mask uint, ix uint) {
Expand All @@ -92,10 +90,9 @@ func StoreRangeH35(handle HasherHandle, data []byte, mask uint, ix_start uint, i
StoreRangeHROLLING_FAST(self.hb, data, mask, ix_start, ix_end)
}

func StitchToPreviousBlockH35(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *H35 = SelfH35(handle)
StitchToPreviousBlockH3(self.ha, num_bytes, position, ringbuffer, ring_buffer_mask)
StitchToPreviousBlockHROLLING_FAST(self.hb, num_bytes, position, ringbuffer, ring_buffer_mask)
func (h *H35) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
}

func PrepareDistanceCacheH35(handle HasherHandle, distance_cache []int) {
Expand Down
19 changes: 9 additions & 10 deletions h4.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,18 @@ func SelfH4(handle HasherHandle) *H4 {
return handle.(*H4)
}

func InitializeH4(handle HasherHandle, params *BrotliEncoderParams) {
func (*H4) Initialize(params *BrotliEncoderParams) {
}

func PrepareH4(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H4 = SelfH4(handle)
func (h *H4) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 17) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH4(data[i:])
for i := 0; i < int(4); i++ {
self.buckets_[key:][i] = 0
h.buckets_[key:][i] = 0
}
}
} else {
Expand All @@ -60,8 +59,8 @@ func PrepareH4(handle HasherHandle, one_shot bool, input_size uint, data []byte)
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
Expand All @@ -83,15 +82,15 @@ func StoreRangeH4(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}

func StitchToPreviousBlockH4(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H4) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH4()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH4(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH4(h, ringbuffer, ringbuffer_mask, position-3)

StoreH4(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH4(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH4(h, ringbuffer, ringbuffer_mask, position-2)
StoreH4(h, ringbuffer, ringbuffer_mask, position-1)
}
}

Expand Down
29 changes: 14 additions & 15 deletions h40.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,17 @@ func SelfH40(handle HasherHandle) *H40 {
return handle.(*H40)
}

func InitializeH40(handle HasherHandle, params *BrotliEncoderParams) {
func (h *H40) Initialize(params *BrotliEncoderParams) {
var q uint
if params.quality > 6 {
q = 7
} else {
q = 8
}
SelfH40(handle).max_hops = q << uint(params.quality-4)
h.max_hops = q << uint(params.quality-4)
}

func PrepareH40(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H40 = SelfH40(handle)
func (h *H40) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (1 << 15) >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
Expand All @@ -73,24 +72,24 @@ func PrepareH40(handle HasherHandle, one_shot bool, input_size uint, data []byte
var bucket uint = HashBytesH40(data[i:])

/* See InitEmpty comment. */
self.addr[bucket] = 0xCCCCCCCC
h.addr[bucket] = 0xCCCCCCCC

self.head[bucket] = 0xCCCC
h.head[bucket] = 0xCCCC
}
} else {
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
processed by hasher never reaches 3GB + 64M; this makes all new chains
to be terminated after the first node. */
var i int
for i = 0; i < len(self.addr); i++ {
self.addr[i] = 0xCCCCCCCC
for i = 0; i < len(h.addr); i++ {
h.addr[i] = 0xCCCCCCCC
}

self.head = [1 << 15]uint16{}
h.head = [1 << 15]uint16{}
}

self.tiny_hash = [65536]byte{}
self.free_slot_idx = [1]uint16{}
h.tiny_hash = [65536]byte{}
h.free_slot_idx = [1]uint16{}
}

/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
Expand Down Expand Up @@ -120,15 +119,15 @@ func StoreRangeH40(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}

func StitchToPreviousBlockH40(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
func (h *H40) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
if num_bytes >= HashTypeLengthH40()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH40(handle, ringbuffer, ring_buffer_mask, position-3)
StoreH40(h, ringbuffer, ring_buffer_mask, position-3)

StoreH40(handle, ringbuffer, ring_buffer_mask, position-2)
StoreH40(handle, ringbuffer, ring_buffer_mask, position-1)
StoreH40(h, ringbuffer, ring_buffer_mask, position-2)
StoreH40(h, ringbuffer, ring_buffer_mask, position-1)
}
}

Expand Down
Loading

0 comments on commit bbbdedf

Please sign in to comment.