diff options
Diffstat (limited to 'vendor/github.com')
14 files changed, 738 insertions, 815 deletions
diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/chunk_decoder.go b/vendor/github.com/dsoprea/go-png-image-structure/v2/chunk_decoder.go deleted file mode 100644 index b5e0b1b16..000000000 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/chunk_decoder.go +++ /dev/null @@ -1,87 +0,0 @@ -package pngstructure - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -type ChunkDecoder struct { -} - -func NewChunkDecoder() *ChunkDecoder { - return new(ChunkDecoder) -} - -func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - switch c.Type { - case "IHDR": - ihdr, err := cd.decodeIHDR(c) - log.PanicIf(err) - - return ihdr, nil - } - - // We don't decode this particular type. - return nil, nil -} - -type ChunkIHDR struct { - Width uint32 - Height uint32 - BitDepth uint8 - ColorType uint8 - CompressionMethod uint8 - FilterMethod uint8 - InterlaceMethod uint8 -} - -func (ihdr *ChunkIHDR) String() string { - return fmt.Sprintf("IHDR<WIDTH=(%d) HEIGHT=(%d) DEPTH=(%d) COLOR-TYPE=(%d) COMP-METHOD=(%d) FILTER-METHOD=(%d) INTRLC-METHOD=(%d)>", ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod) -} - -func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (ihdr *ChunkIHDR, err error) { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - b := bytes.NewBuffer(c.Data) - - ihdr = new(ChunkIHDR) - - err = binary.Read(b, binary.BigEndian, &ihdr.Width) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.Height) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.BitDepth) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.ColorType) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.CompressionMethod) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.FilterMethod) - log.PanicIf(err) - - err = binary.Read(b, binary.BigEndian, &ihdr.InterlaceMethod) - log.PanicIf(err) - - return ihdr, nil -} diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/media_parser.go b/vendor/github.com/dsoprea/go-png-image-structure/v2/media_parser.go deleted file mode 100644 index c0e287365..000000000 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/media_parser.go +++ /dev/null @@ -1,118 +0,0 @@ -package pngstructure - -import ( - "bufio" - "bytes" - "image" - "io" - "os" - - "image/png" - - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-utility/v2/image" -) - -// PngMediaParser knows how to parse a PNG stream. -type PngMediaParser struct { -} - -// NewPngMediaParser returns a new `PngMediaParser` struct. -func NewPngMediaParser() *PngMediaParser { - - // TODO(dustin): Add test - - return new(PngMediaParser) -} - -// Parse parses a PNG stream given a `io.ReadSeeker`. -func (pmp *PngMediaParser) Parse(rs io.ReadSeeker, size int) (mc riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - ps := NewPngSplitter() - - err = ps.readHeader(rs) - log.PanicIf(err) - - s := bufio.NewScanner(rs) - - // Since each segment can be any size, our buffer must be allowed to grow - // as large as the file. - buffer := []byte{} - s.Buffer(buffer, size) - s.Split(ps.Split) - - for s.Scan() != false { - } - - log.PanicIf(s.Err()) - - return ps.Chunks(), nil -} - -// ParseFile parses a PNG stream given a file-path. -func (pmp *PngMediaParser) ParseFile(filepath string) (mc riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - f, err := os.Open(filepath) - log.PanicIf(err) - - defer f.Close() - - stat, err := f.Stat() - log.PanicIf(err) - - size := stat.Size() - - chunks, err := pmp.Parse(f, int(size)) - log.PanicIf(err) - - return chunks, nil -} - -// ParseBytes parses a PNG stream given a byte-slice. -func (pmp *PngMediaParser) ParseBytes(data []byte) (mc riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - br := bytes.NewReader(data) - - chunks, err := pmp.Parse(br, len(data)) - log.PanicIf(err) - - return chunks, nil -} - -// LooksLikeFormat returns a boolean indicating whether the stream looks like a -// PNG image. -func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { - return bytes.Compare(data[:len(PngSignature)], PngSignature[:]) == 0 -} - -// GetImage returns an image.Image-compatible struct. -func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { - img, err = png.Decode(r) - log.PanicIf(err) - - return img, nil -} - -var ( - // Enforce interface conformance. - _ riimage.MediaParser = new(PngMediaParser) -) diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/png.go b/vendor/github.com/dsoprea/go-png-image-structure/v2/png.go deleted file mode 100644 index fbb022887..000000000 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/png.go +++ /dev/null @@ -1,416 +0,0 @@ -package pngstructure - -import ( - "bytes" - "errors" - "fmt" - "io" - - "encoding/binary" - "hash/crc32" - - "github.com/dsoprea/go-exif/v3" - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-utility/v2/image" -) - -var ( - PngSignature = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} - EXifChunkType = "eXIf" - IHDRChunkType = "IHDR" -) - -var ( - ErrNotPng = errors.New("not png data") - ErrCrcFailure = errors.New("crc failure") -) - -// ChunkSlice encapsulates a slice of chunks. -type ChunkSlice struct { - chunks []*Chunk -} - -func NewChunkSlice(chunks []*Chunk) *ChunkSlice { - if len(chunks) == 0 { - log.Panicf("ChunkSlice must be initialized with at least one chunk (IHDR)") - } else if chunks[0].Type != IHDRChunkType { - log.Panicf("first chunk in any ChunkSlice must be an IHDR") - } - - return &ChunkSlice{ - chunks: chunks, - } -} - -func NewPngChunkSlice() *ChunkSlice { - - ihdrChunk := &Chunk{ - Type: IHDRChunkType, - } - - ihdrChunk.UpdateCrc32() - - return NewChunkSlice([]*Chunk{ihdrChunk}) -} - -func (cs *ChunkSlice) String() string { - return fmt.Sprintf("ChunkSlize<LEN=(%d)>", len(cs.chunks)) -} - -// Chunks exposes the actual slice. -func (cs *ChunkSlice) Chunks() []*Chunk { - return cs.chunks -} - -// Write encodes and writes all chunks. -func (cs *ChunkSlice) WriteTo(w io.Writer) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - _, err = w.Write(PngSignature[:]) - log.PanicIf(err) - - // TODO(dustin): !! This should respect the safe-to-copy characteristic. - for _, c := range cs.chunks { - _, err := c.WriteTo(w) - log.PanicIf(err) - } - - return nil -} - -// Index returns a map of chunk types to chunk slices, grouping all like chunks. -func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { - index = make(map[string][]*Chunk) - for _, c := range cs.chunks { - if grouped, found := index[c.Type]; found == true { - index[c.Type] = append(grouped, c) - } else { - index[c.Type] = []*Chunk{c} - } - } - - return index -} - -// FindExif returns the the segment that hosts the EXIF data. -func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - index := cs.Index() - - if chunks, found := index[EXifChunkType]; found == true { - return chunks[0], nil - } - - log.Panic(exif.ErrNoExif) - - // Never called. - return nil, nil -} - -// Exif returns an `exif.Ifd` instance with the existing tags. -func (cs *ChunkSlice) Exif() (rootIfd *exif.Ifd, data []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - chunk, err := cs.FindExif() - log.PanicIf(err) - - im, err := exifcommon.NewIfdMappingWithStandard() - log.PanicIf(err) - - ti := exif.NewTagIndex() - - // TODO(dustin): Refactor and support `exif.GetExifData()`. - - _, index, err := exif.Collect(im, ti, chunk.Data) - log.PanicIf(err) - - return index.RootIfd, chunk.Data, nil -} - -// ConstructExifBuilder returns an `exif.IfdBuilder` instance (needed for -// modifying) preloaded with all existing tags. -func (cs *ChunkSlice) ConstructExifBuilder() (rootIb *exif.IfdBuilder, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rootIfd, _, err := cs.Exif() - log.PanicIf(err) - - ib := exif.NewIfdBuilderFromExistingChain(rootIfd) - - return ib, nil -} - -// SetExif encodes and sets EXIF data into this segment. -func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Encode. - - ibe := exif.NewIfdByteEncoder() - - exifData, err := ibe.EncodeToExif(ib) - log.PanicIf(err) - - // Set. - - exifChunk, err := cs.FindExif() - if err == nil { - // EXIF chunk already exists. - - exifChunk.Data = exifData - exifChunk.Length = uint32(len(exifData)) - } else { - if log.Is(err, exif.ErrNoExif) != true { - log.Panic(err) - } - - // Add a EXIF chunk for the first time. - - exifChunk = &Chunk{ - Type: EXifChunkType, - Data: exifData, - Length: uint32(len(exifData)), - } - - // Insert it after the IHDR chunk (it's a reliably appropriate place to - // put it). - cs.chunks = append(cs.chunks[:1], append([]*Chunk{exifChunk}, cs.chunks[1:]...)...) - } - - exifChunk.UpdateCrc32() - - return nil -} - -// PngSplitter hosts the princpal `Split()` method uses by `bufio.Scanner`. -type PngSplitter struct { - chunks []*Chunk - currentOffset int - - doCheckCrc bool - crcErrors []string -} - -func (ps *PngSplitter) Chunks() *ChunkSlice { - return NewChunkSlice(ps.chunks) -} - -func (ps *PngSplitter) DoCheckCrc(doCheck bool) { - ps.doCheckCrc = doCheck -} - -func (ps *PngSplitter) CrcErrors() []string { - return ps.crcErrors -} - -func NewPngSplitter() *PngSplitter { - return &PngSplitter{ - chunks: make([]*Chunk, 0), - doCheckCrc: true, - crcErrors: make([]string, 0), - } -} - -// Chunk describes a single chunk. -type Chunk struct { - Offset int - Length uint32 - Type string - Data []byte - Crc uint32 -} - -func (c *Chunk) String() string { - return fmt.Sprintf("Chunk<OFFSET=(%d) LENGTH=(%d) TYPE=[%s] CRC=(%d)>", c.Offset, c.Length, c.Type, c.Crc) -} - -func calculateCrc32(chunk *Chunk) uint32 { - c := crc32.NewIEEE() - - c.Write([]byte(chunk.Type)) - c.Write(chunk.Data) - - return c.Sum32() -} - -func (c *Chunk) UpdateCrc32() { - c.Crc = calculateCrc32(c) -} - -func (c *Chunk) CheckCrc32() bool { - expected := calculateCrc32(c) - return c.Crc == expected -} - -// Bytes encodes and returns the bytes for this chunk. -func (c *Chunk) Bytes() []byte { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - if len(c.Data) != int(c.Length) { - log.Panicf("length of data not correct") - } - - preallocated := make([]byte, 0, 4+4+c.Length+4) - b := bytes.NewBuffer(preallocated) - - err := binary.Write(b, binary.BigEndian, c.Length) - log.PanicIf(err) - - _, err = b.Write([]byte(c.Type)) - log.PanicIf(err) - - if c.Data != nil { - _, err = b.Write(c.Data) - log.PanicIf(err) - } - - err = binary.Write(b, binary.BigEndian, c.Crc) - log.PanicIf(err) - - return b.Bytes() -} - -// Write encodes and writes the bytes for this chunk. -func (c *Chunk) WriteTo(w io.Writer) (count int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if len(c.Data) != int(c.Length) { - log.Panicf("length of data not correct") - } - - err = binary.Write(w, binary.BigEndian, c.Length) - log.PanicIf(err) - - _, err = w.Write([]byte(c.Type)) - log.PanicIf(err) - - _, err = w.Write(c.Data) - log.PanicIf(err) - - err = binary.Write(w, binary.BigEndian, c.Crc) - log.PanicIf(err) - - return 4 + len(c.Type) + len(c.Data) + 4, nil -} - -// readHeader verifies that the PNG header bytes appear next. -func (ps *PngSplitter) readHeader(r io.Reader) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - len_ := len(PngSignature) - header := make([]byte, len_) - - _, err = r.Read(header) - log.PanicIf(err) - - ps.currentOffset += len_ - - if bytes.Compare(header, PngSignature[:]) != 0 { - log.Panic(ErrNotPng) - } - - return nil -} - -// Split fulfills the `bufio.SplitFunc` function definition for -// `bufio.Scanner`. -func (ps *PngSplitter) Split(data []byte, atEOF bool) (advance int, token []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // We might have more than one chunk's worth, and, if `atEOF` is true, we - // won't be called again. We'll repeatedly try to read additional chunks, - // but, when we run out of the data we were given then we'll return the - // number of bytes fo rthe chunks we've already completely read. Then, - // we'll be called again from theend ofthose bytes, at which point we'll - // indicate that we don't yet have enough for another chunk, and we should - // be then called with more. - for { - len_ := len(data) - if len_ < 8 { - return advance, nil, nil - } - - length := binary.BigEndian.Uint32(data[:4]) - type_ := string(data[4:8]) - chunkSize := (8 + int(length) + 4) - - if len_ < chunkSize { - return advance, nil, nil - } - - crcIndex := 8 + length - crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) - - content := make([]byte, length) - copy(content, data[8:8+length]) - - c := &Chunk{ - Length: length, - Type: type_, - Data: content, - Crc: crc, - Offset: ps.currentOffset, - } - - ps.chunks = append(ps.chunks, c) - - if c.CheckCrc32() == false { - ps.crcErrors = append(ps.crcErrors, type_) - - if ps.doCheckCrc == true { - log.Panic(ErrCrcFailure) - } - } - - advance += chunkSize - ps.currentOffset += chunkSize - - data = data[chunkSize:] - } - - return advance, nil, nil -} - -var ( - // Enforce interface conformance. - _ riimage.MediaContext = new(ChunkSlice) -) diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/testing_common.go b/vendor/github.com/dsoprea/go-png-image-structure/v2/testing_common.go deleted file mode 100644 index 9df13a858..000000000 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/testing_common.go +++ /dev/null @@ -1,64 +0,0 @@ -package pngstructure - -import ( - "os" - "path" - - "github.com/dsoprea/go-logging" -) - -var ( - assetsPath = "" -) - -func getModuleRootPath() string { - moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath - } - - currentWd, err := os.Getwd() - log.PanicIf(err) - - currentPath := currentWd - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && os.IsNotExist(err) != true { - log.Panic(err) - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - log.Panicf("could not find module-root: %v", visited) - } - } - - return currentPath -} - -func getTestAssetsPath() string { - if assetsPath == "" { - moduleRootPath := getModuleRootPath() - assetsPath = path.Join(moduleRootPath, "assets") - } - - return assetsPath -} - -func getTestBasicImageFilepath() string { - assetsPath := getTestAssetsPath() - return path.Join(assetsPath, "libpng.png") -} - -func getTestExifImageFilepath() string { - assetsPath := getTestAssetsPath() - return path.Join(assetsPath, "exif.png") -} diff --git a/vendor/github.com/superseriousbusiness/exif-terminator/logger.go b/vendor/github.com/superseriousbusiness/exif-terminator/logger.go deleted file mode 100644 index e607c55dd..000000000 --- a/vendor/github.com/superseriousbusiness/exif-terminator/logger.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - exif-terminator - Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see <http://www.gnu.org/licenses/>. -*/ - -package terminator - -import "fmt" - -var logger ErrorLogger - -func init() { - logger = &defaultErrorLogger{} -} - -// ErrorLogger denotes a generic error logging function. -type ErrorLogger interface { - Error(args ...interface{}) -} - -type defaultErrorLogger struct{} - -func (d *defaultErrorLogger) Error(args ...interface{}) { - fmt.Println(args...) -} - -// SetErrorLogger allows a user of the exif-terminator library -// to set the logger that will be used for error logging. -// -// If it is not set, the default error logger will be used, which -// just prints errors to stdout. -func SetErrorLogger(errorLogger ErrorLogger) { - logger = errorLogger -} diff --git a/vendor/github.com/superseriousbusiness/exif-terminator/png.go b/vendor/github.com/superseriousbusiness/exif-terminator/png.go index 4a1ac5bf1..774ec0ed6 100644 --- a/vendor/github.com/superseriousbusiness/exif-terminator/png.go +++ b/vendor/github.com/superseriousbusiness/exif-terminator/png.go @@ -19,10 +19,9 @@ package terminator import ( - "encoding/binary" "io" - pngstructure "github.com/dsoprea/go-png-image-structure/v2" + pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" ) type pngVisitor struct { @@ -45,49 +44,50 @@ func (v *pngVisitor) split(data []byte, atEOF bool) (int, []byte, error) { } } - // check if the splitter has any new chunks in it that we haven't written yet - chunkSlice := v.ps.Chunks() + // Check if the splitter now has + // any new chunks in it for us. + chunkSlice, err := v.ps.Chunks() + if err != nil { + return advance, token, err + } + + // Write each chunk by passing it + // through our custom write func, + // which strips out exif and fixes + // the CRC of each chunk. chunks := chunkSlice.Chunks() for i, chunk := range chunks { - // look through all the chunks in the splitter - if i > v.lastWrittenChunk { - // we've got a chunk we haven't written yet! write it... - if err := v.writeChunk(chunk); err != nil { - return advance, token, err - } - // then remove the data - chunk.Data = chunk.Data[:0] - // and update - v.lastWrittenChunk = i + if i <= v.lastWrittenChunk { + // Skip already + // written chunks. + continue } + + // Write this new chunk. + if err := v.writeChunk(chunk); err != nil { + return advance, token, err + } + v.lastWrittenChunk = i + + // Zero data; here you + // go garbage collector. + chunk.Data = nil } return advance, token, err } func (v *pngVisitor) writeChunk(chunk *pngstructure.Chunk) error { - if err := binary.Write(v.writer, binary.BigEndian, chunk.Length); err != nil { - return err - } - - if _, err := v.writer.Write([]byte(chunk.Type)); err != nil { - return err - } - if chunk.Type == pngstructure.EXifChunkType { - blank := make([]byte, len(chunk.Data)) - if _, err := v.writer.Write(blank); err != nil { - return err - } - } else { - if _, err := v.writer.Write(chunk.Data); err != nil { - return err - } + // Replace exif data + // with zero bytes. + clear(chunk.Data) } - if err := binary.Write(v.writer, binary.BigEndian, chunk.Crc); err != nil { - return err - } + // Fix CRC of each chunk. + chunk.UpdateCrc32() - return nil + // finally, write chunk to writer. + _, err := chunk.WriteTo(v.writer) + return err } diff --git a/vendor/github.com/superseriousbusiness/exif-terminator/terminator.go b/vendor/github.com/superseriousbusiness/exif-terminator/terminator.go index 9d9e6e743..7dd3d9ad7 100644 --- a/vendor/github.com/superseriousbusiness/exif-terminator/terminator.go +++ b/vendor/github.com/superseriousbusiness/exif-terminator/terminator.go @@ -25,29 +25,34 @@ import ( "fmt" "io" - pngstructure "github.com/dsoprea/go-png-image-structure/v2" jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" + pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" ) func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) { - // to avoid keeping too much stuff in memory we want to pipe data directly + // To avoid keeping too much stuff + // in memory we want to pipe data + // directly to the reader. pipeReader, pipeWriter := io.Pipe() - // we don't know ahead of time how long segments might be: they could be as large as - // the file itself, so unfortunately we need to allocate a buffer here that'scanner as large - // as the file + // We don't know ahead of time how long + // segments might be: they could be as + // large as the file itself, so we need + // a buffer with generous overhead. scanner := bufio.NewScanner(in) scanner.Buffer([]byte{}, fileSize) - var err error + var err error switch mediaType { case "image/jpeg", "jpeg", "jpg": err = terminateJpeg(scanner, pipeWriter, fileSize) + case "image/webp", "webp": err = terminateWebp(scanner, pipeWriter) + case "image/png", "png": - // for pngs we need to skip the header bytes, so read them in - // and check we're really dealing with a png here + // For pngs we need to skip the header bytes, so read + // them in and check we're really dealing with a png. header := make([]byte, len(pngstructure.PngSignature)) if _, headerError := in.Read(header); headerError != nil { err = headerError @@ -67,68 +72,87 @@ func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) return pipeReader, err } -func terminateJpeg(scanner *bufio.Scanner, writer io.WriteCloser, expectedFileSize int) error { - // jpeg visitor is where the spicy hack of streaming the de-exifed data is contained +func terminateJpeg(scanner *bufio.Scanner, writer *io.PipeWriter, expectedFileSize int) error { v := &jpegVisitor{ writer: writer, expectedFileSize: expectedFileSize, } - // provide the visitor to the splitter so that it triggers on every section scan + // Provide the visitor to the splitter so + // that it triggers on every section scan. js := jpegstructure.NewJpegSplitter(v) - // the visitor also needs to read back the list of segments: for this it needs - // to know what jpeg splitter it's attached to, so give it a pointer to the splitter + // The visitor also needs to read back the + // list of segments: for this it needs to + // know what jpeg splitter it's attached to, + // so give it a pointer to the splitter. v.js = js - // use the jpeg splitters 'split' function, which satisfies the bufio.SplitFunc interface + // Jpeg visitor's 'split' function + // satisfies bufio.SplitFunc{}. scanner.Split(js.Split) - scanAndClose(scanner, writer) + go scanAndClose(scanner, writer) return nil } -func terminateWebp(scanner *bufio.Scanner, writer io.WriteCloser) error { +func terminateWebp(scanner *bufio.Scanner, writer *io.PipeWriter) error { v := &webpVisitor{ writer: writer, } - // use the webp visitor's 'split' function, which satisfies the bufio.SplitFunc interface + // Webp visitor's 'split' function + // satisfies bufio.SplitFunc{}. scanner.Split(v.split) - scanAndClose(scanner, writer) + go scanAndClose(scanner, writer) return nil } -func terminatePng(scanner *bufio.Scanner, writer io.WriteCloser) error { +func terminatePng(scanner *bufio.Scanner, writer *io.PipeWriter) error { ps := pngstructure.NewPngSplitter() + // Don't bother checking CRC; + // we're overwriting it anyway. + ps.DoCheckCrc(false) + v := &pngVisitor{ ps: ps, writer: writer, lastWrittenChunk: -1, } - // use the png visitor's 'split' function, which satisfies the bufio.SplitFunc interface + // Png visitor's 'split' function + // satisfies bufio.SplitFunc{}. scanner.Split(v.split) - scanAndClose(scanner, writer) + go scanAndClose(scanner, writer) return nil } -func scanAndClose(scanner *bufio.Scanner, writer io.WriteCloser) { - // scan asynchronously until there's nothing left to scan, and then close the writer - // so that the reader on the other side knows that we're done - // - // due to the nature of io.Pipe, writing won't actually work - // until the pipeReader starts being read by the caller, which - // is why we do this asynchronously - go func() { - defer writer.Close() - for scanner.Scan() { - } - if scanner.Err() != nil { - logger.Error(scanner.Err()) - } +// scanAndClose scans through the given scanner until there's +// nothing left to scan, and then closes the writer so that the +// reader on the other side of the pipe knows that we're done. +// +// Any error encountered when scanning will be logged by terminator. +// +// Due to the nature of io.Pipe, writing won't actually work +// until the pipeReader starts being read by the caller, which +// is why this function should always be called asynchronously. +func scanAndClose(scanner *bufio.Scanner, writer *io.PipeWriter) { + var err error + + defer func() { + // Always close writer, using returned + // scanner error (if any). If err is nil + // then the standard io.EOF will be used. + // (this will not overwrite existing). + writer.CloseWithError(err) }() + + for scanner.Scan() { + } + + // Set error on return. + err = scanner.Err() } diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/.MODULE_ROOT b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT index e69de29bb..e69de29bb 100644 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/.MODULE_ROOT +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/LICENSE b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE index 163291ed6..163291ed6 100644 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/LICENSE +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go new file mode 100644 index 000000000..518bc91ad --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go @@ -0,0 +1,81 @@ +package pngstructure + +import ( + "bytes" + "fmt" + + "encoding/binary" +) + +type ChunkDecoder struct { +} + +func NewChunkDecoder() *ChunkDecoder { + return new(ChunkDecoder) +} + +func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { + switch c.Type { + case "IHDR": + return cd.decodeIHDR(c) + } + + // We don't decode this type. + return nil, nil +} + +type ChunkIHDR struct { + Width uint32 + Height uint32 + BitDepth uint8 + ColorType uint8 + CompressionMethod uint8 + FilterMethod uint8 + InterlaceMethod uint8 +} + +func (ihdr *ChunkIHDR) String() string { + return fmt.Sprintf("IHDR<WIDTH=(%d) HEIGHT=(%d) DEPTH=(%d) COLOR-TYPE=(%d) COMP-METHOD=(%d) FILTER-METHOD=(%d) INTRLC-METHOD=(%d)>", + ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod, + ) +} + +func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (*ChunkIHDR, error) { + var ( + b = bytes.NewBuffer(c.Data) + ihdr = new(ChunkIHDR) + readf = func(data interface{}) error { + return binary.Read(b, binary.BigEndian, data) + } + ) + + if err := readf(&ihdr.Width); err != nil { + return nil, err + } + + if err := readf(&ihdr.Height); err != nil { + return nil, err + } + + if err := readf(&ihdr.BitDepth); err != nil { + return nil, err + } + + if err := readf(&ihdr.ColorType); err != nil { + return nil, err + } + + if err := readf(&ihdr.CompressionMethod); err != nil { + return nil, err + } + + if err := readf(&ihdr.FilterMethod); err != nil { + return nil, err + } + + if err := readf(&ihdr.InterlaceMethod); err != nil { + return nil, err + } + + return ihdr, nil +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go new file mode 100644 index 000000000..4c8421905 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go @@ -0,0 +1,85 @@ +package pngstructure + +import ( + "bufio" + "bytes" + "image" + "io" + "os" + + "image/png" + + riimage "github.com/dsoprea/go-utility/v2/image" +) + +// PngMediaParser knows how to parse a PNG stream. +type PngMediaParser struct { +} + +// NewPngMediaParser returns a new `PngMediaParser`. +func NewPngMediaParser() riimage.MediaParser { + return new(PngMediaParser) +} + +// Parse parses a PNG stream given a `io.ReadSeeker`. +func (pmp *PngMediaParser) Parse( + rs io.ReadSeeker, + size int, +) (riimage.MediaContext, error) { + ps := NewPngSplitter() + if err := ps.readHeader(rs); err != nil { + return nil, err + } + + s := bufio.NewScanner(rs) + + // Since each segment can be any + // size, our buffer must be allowed + // to grow as large as the file. + buffer := []byte{} + s.Buffer(buffer, size) + s.Split(ps.Split) + + for s.Scan() { + } + + if err := s.Err(); err != nil { + return nil, err + } + + return ps.Chunks() +} + +// ParseFile parses a PNG stream given a file-path. +func (pmp *PngMediaParser) ParseFile(filepath string) (riimage.MediaContext, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + size := stat.Size() + return pmp.Parse(f, int(size)) +} + +// ParseBytes parses a PNG stream given a byte-slice. +func (pmp *PngMediaParser) ParseBytes(data []byte) (riimage.MediaContext, error) { + br := bytes.NewReader(data) + return pmp.Parse(br, len(data)) +} + +// LooksLikeFormat returns a boolean indicating +// whether the stream looks like a PNG image. +func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { + return bytes.Equal(data[:len(PngSignature)], PngSignature[:]) +} + +// GetImage returns an image.Image-compatible struct. +func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { + return png.Decode(r) +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go new file mode 100644 index 000000000..dfe773b71 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go @@ -0,0 +1,386 @@ +package pngstructure + +import ( + "bytes" + "errors" + "fmt" + "io" + + "encoding/binary" + "hash/crc32" + + "github.com/dsoprea/go-exif/v3" + exifcommon "github.com/dsoprea/go-exif/v3/common" + riimage "github.com/dsoprea/go-utility/v2/image" +) + +var ( + PngSignature = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} + EXifChunkType = "eXIf" + IHDRChunkType = "IHDR" +) + +var ( + ErrNotPng = errors.New("not png data") + ErrCrcFailure = errors.New("crc failure") +) + +// ChunkSlice encapsulates a slice of chunks. +type ChunkSlice struct { + chunks []*Chunk +} + +func NewChunkSlice(chunks []*Chunk) (*ChunkSlice, error) { + if len(chunks) == 0 { + err := errors.New("ChunkSlice must be initialized with at least one chunk (IHDR)") + return nil, err + } else if chunks[0].Type != IHDRChunkType { + err := errors.New("first chunk in any ChunkSlice must be an IHDR") + return nil, err + } + + return &ChunkSlice{chunks}, nil +} + +func NewPngChunkSlice() (*ChunkSlice, error) { + ihdrChunk := &Chunk{ + Type: IHDRChunkType, + } + + ihdrChunk.UpdateCrc32() + + return NewChunkSlice([]*Chunk{ihdrChunk}) +} + +func (cs *ChunkSlice) String() string { + return fmt.Sprintf("ChunkSlize<LEN=(%d)>", len(cs.chunks)) +} + +// Chunks exposes the actual slice. +func (cs *ChunkSlice) Chunks() []*Chunk { + return cs.chunks +} + +// Write encodes and writes all chunks. +func (cs *ChunkSlice) WriteTo(w io.Writer) error { + if _, err := w.Write(PngSignature[:]); err != nil { + return err + } + + // TODO(dustin): !! This should respect + // the safe-to-copy characteristic. + for _, c := range cs.chunks { + if _, err := c.WriteTo(w); err != nil { + return err + } + } + + return nil +} + +// Index returns a map of chunk types to chunk slices, grouping all like chunks. +func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { + index = make(map[string][]*Chunk) + for _, c := range cs.chunks { + if grouped, found := index[c.Type]; found { + index[c.Type] = append(grouped, c) + } else { + index[c.Type] = []*Chunk{c} + } + } + + return index +} + +// FindExif returns the the segment that hosts the EXIF data. +func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { + index := cs.Index() + if chunks, found := index[EXifChunkType]; found { + return chunks[0], nil + } + + return nil, exif.ErrNoExif +} + +// Exif returns an `exif.Ifd` instance with the existing tags. +func (cs *ChunkSlice) Exif() (*exif.Ifd, []byte, error) { + chunk, err := cs.FindExif() + if err != nil { + return nil, nil, err + } + + im, err := exifcommon.NewIfdMappingWithStandard() + if err != nil { + return nil, nil, err + } + + ti := exif.NewTagIndex() + + _, index, err := exif.Collect(im, ti, chunk.Data) + if err != nil { + return nil, nil, err + } + + return index.RootIfd, chunk.Data, nil +} + +// ConstructExifBuilder returns an `exif.IfdBuilder` instance +// (needed for modifying) preloaded with all existing tags. +func (cs *ChunkSlice) ConstructExifBuilder() (*exif.IfdBuilder, error) { + rootIfd, _, err := cs.Exif() + if err != nil { + return nil, err + } + + return exif.NewIfdBuilderFromExistingChain(rootIfd), nil +} + +// SetExif encodes and sets EXIF data into this segment. +func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) error { + // Encode. + + ibe := exif.NewIfdByteEncoder() + + exifData, err := ibe.EncodeToExif(ib) + if err != nil { + return err + } + + // Set. + + exifChunk, err := cs.FindExif() + + switch { + case err == nil: + // EXIF chunk already exists. + exifChunk.Data = exifData + exifChunk.Length = uint32(len(exifData)) + + case errors.Is(err, exif.ErrNoExif): + // Add a EXIF chunk for the first time. + exifChunk = &Chunk{ + Type: EXifChunkType, + Data: exifData, + Length: uint32(len(exifData)), + } + + // Insert exif after the IHDR chunk; it's + // a reliably appropriate place to put it. + cs.chunks = append( + cs.chunks[:1], + append( + []*Chunk{exifChunk}, + cs.chunks[1:]..., + )..., + ) + + default: + return err + } + + exifChunk.UpdateCrc32() + return nil +} + +// PngSplitter hosts the princpal `Split()` +// method uses by `bufio.Scanner`. +type PngSplitter struct { + chunks []*Chunk + currentOffset int + + doCheckCrc bool + crcErrors []string +} + +func (ps *PngSplitter) Chunks() (*ChunkSlice, error) { + return NewChunkSlice(ps.chunks) +} + +func (ps *PngSplitter) DoCheckCrc(doCheck bool) { + ps.doCheckCrc = doCheck +} + +func (ps *PngSplitter) CrcErrors() []string { + return ps.crcErrors +} + +func NewPngSplitter() *PngSplitter { + return &PngSplitter{ + chunks: make([]*Chunk, 0), + doCheckCrc: true, + crcErrors: make([]string, 0), + } +} + +// Chunk describes a single chunk. +type Chunk struct { + Offset int + Length uint32 + Type string + Data []byte + Crc uint32 +} + +func (c *Chunk) String() string { + return fmt.Sprintf("Chunk<OFFSET=(%d) LENGTH=(%d) TYPE=[%s] CRC=(%d)>", c.Offset, c.Length, c.Type, c.Crc) +} + +func calculateCrc32(chunk *Chunk) uint32 { + c := crc32.NewIEEE() + + c.Write([]byte(chunk.Type)) + c.Write(chunk.Data) + + return c.Sum32() +} + +func (c *Chunk) UpdateCrc32() { + c.Crc = calculateCrc32(c) +} + +func (c *Chunk) CheckCrc32() bool { + expected := calculateCrc32(c) + return c.Crc == expected +} + +// Bytes encodes and returns the bytes for this chunk. +func (c *Chunk) Bytes() ([]byte, error) { + if len(c.Data) != int(c.Length) { + return nil, errors.New("length of data not correct") + } + b := make([]byte, 0, 4+4+c.Length+4) + b = binary.BigEndian.AppendUint32(b, c.Length) + b = append(b, c.Type...) + b = append(b, c.Data...) + b = binary.BigEndian.AppendUint32(b, c.Crc) + return b, nil +} + +// Write encodes and writes the bytes for this chunk. +func (c *Chunk) WriteTo(w io.Writer) (int, error) { + if len(c.Data) != int(c.Length) { + return 0, errors.New("length of data not correct") + } + + var n int + + b := make([]byte, 4) // uint32 buf + + binary.BigEndian.PutUint32(b, c.Length) + if nn, err := w.Write(b); err != nil { + return n + nn, err + } + + n += len(b) + + if nn, err := io.WriteString(w, c.Type); err != nil { + return n + nn, err + } + + n += len(c.Type) + + if nn, err := w.Write(c.Data); err != nil { + return n + nn, err + } + + n += len(c.Data) + + binary.BigEndian.PutUint32(b, c.Crc) + if nn, err := w.Write(b); err != nil { + return n + nn, err + } + + n += len(b) + + return n, nil +} + +// readHeader verifies that the PNG header bytes appear next. +func (ps *PngSplitter) readHeader(r io.Reader) error { + var ( + sigLen = len(PngSignature) + header = make([]byte, sigLen) + ) + + if _, err := r.Read(header); err != nil { + return err + } + + ps.currentOffset += sigLen + if !bytes.Equal(header, PngSignature[:]) { + return ErrNotPng + } + + return nil +} + +// Split fulfills the `bufio.SplitFunc` +// function definition for `bufio.Scanner`. +func (ps *PngSplitter) Split( + data []byte, + atEOF bool, +) ( + advance int, + token []byte, + err error, +) { + // We might have more than one chunk's worth, and, + // if `atEOF` is true, we won't be called again. + // We'll repeatedly try to read additional chunks, + // but, when we run out of the data we were given + // then we'll return the number of bytes for the + // chunks we've already completely read. Then, we'll + // be called again from the end ofthose bytes, at + // which point we'll indicate that we don't yet have + // enough for another chunk, and we should be then + // called with more. + for { + len_ := len(data) + if len_ < 8 { + return advance, nil, nil + } + + length := binary.BigEndian.Uint32(data[:4]) + type_ := string(data[4:8]) + chunkSize := (8 + int(length) + 4) + + if len_ < chunkSize { + return advance, nil, nil + } + + crcIndex := 8 + length + crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) + + content := make([]byte, length) + copy(content, data[8:8+length]) + + c := &Chunk{ + Length: length, + Type: type_, + Data: content, + Crc: crc, + Offset: ps.currentOffset, + } + + ps.chunks = append(ps.chunks, c) + + if !c.CheckCrc32() { + ps.crcErrors = append(ps.crcErrors, type_) + + if ps.doCheckCrc { + err = ErrCrcFailure + return + } + } + + advance += chunkSize + ps.currentOffset += chunkSize + + data = data[chunkSize:] + } +} + +var ( + // Enforce interface conformance. + _ riimage.MediaContext = new(ChunkSlice) +) diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go new file mode 100644 index 000000000..42f28d282 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go @@ -0,0 +1,77 @@ +package pngstructure + +import ( + "fmt" + "os" + "path" +) + +var ( + assetsPath = "assets" +) + +func getModuleRootPath() (string, error) { + moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath, nil + } + + currentWd, err := os.Getwd() + if err != nil { + return "", err + } + + currentPath := currentWd + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && !os.IsNotExist(err) { + return "", err + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + return "", fmt.Errorf("could not find module-root: %v", visited) + } + } + + return currentPath, nil +} + +func getTestAssetsPath() (string, error) { + if assetsPath == "" { + moduleRootPath, err := getModuleRootPath() + if err != nil { + return "", err + } + + assetsPath = path.Join(moduleRootPath, "assets") + } + + return assetsPath, nil +} + +func getTestBasicImageFilepath() (string, error) { + assetsPath, err := getTestAssetsPath() + if err != nil { + return "", err + } + + return path.Join(assetsPath, "libpng.png"), nil +} + +func getTestExifImageFilepath() (string, error) { + assetsPath, err := getTestAssetsPath() + if err != nil { + return "", err + } + + return path.Join(assetsPath, "exif.png"), nil +} diff --git a/vendor/github.com/dsoprea/go-png-image-structure/v2/utility.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go index dbff145a6..cac6020f2 100644 --- a/vendor/github.com/dsoprea/go-png-image-structure/v2/utility.go +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go @@ -3,8 +3,6 @@ package pngstructure import ( "bytes" "fmt" - - "github.com/dsoprea/go-logging" ) func DumpBytes(data []byte) { @@ -32,34 +30,38 @@ func DumpBytesClause(data []byte) { fmt.Printf(" }\n") } -func DumpBytesToString(data []byte) string { +func DumpBytesToString(data []byte) (string, error) { b := new(bytes.Buffer) for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("%02x", x)) - log.PanicIf(err) + if _, err := b.WriteString(fmt.Sprintf("%02x", x)); err != nil { + return "", err + } if i < len(data)-1 { - _, err := b.WriteRune(' ') - log.PanicIf(err) + if _, err := b.WriteRune(' '); err != nil { + return "", err + } } } - return b.String() + return b.String(), nil } -func DumpBytesClauseToString(data []byte) string { +func DumpBytesClauseToString(data []byte) (string, error) { b := new(bytes.Buffer) for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("0x%02x", x)) - log.PanicIf(err) + if _, err := b.WriteString(fmt.Sprintf("0x%02x", x)); err != nil { + return "", err + } if i < len(data)-1 { - _, err := b.WriteString(", ") - log.PanicIf(err) + if _, err := b.WriteString(", "); err != nil { + return "", err + } } } - return b.String() + return b.String(), nil } |