diff options
| author | 2022-01-23 14:41:31 +0100 | |
|---|---|---|
| committer | 2022-01-23 14:41:31 +0100 | |
| commit | 7d024ce74d29a14bc8db60495751e674bdb24463 (patch) | |
| tree | 6d6b0cde4a2245a2539e7251c484bcaf00291056 /vendor/github.com/dsoprea/go-exif/v3/common | |
| parent | pass reader around instead of []byte (diff) | |
| download | gotosocial-7d024ce74d29a14bc8db60495751e674bdb24463.tar.xz | |
use exif-terminator
Diffstat (limited to 'vendor/github.com/dsoprea/go-exif/v3/common')
7 files changed, 2386 insertions, 0 deletions
diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go b/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go new file mode 100644 index 000000000..01886e966 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go @@ -0,0 +1,651 @@ +package exifcommon + +import ( +	"errors" +	"fmt" +	"strings" + +	"github.com/dsoprea/go-logging" +) + +var ( +	ifdLogger = log.NewLogger("exifcommon.ifd") +) + +var ( +	ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent") +) + +// MappedIfd is one node in the IFD-mapping. +type MappedIfd struct { +	ParentTagId uint16 +	Placement   []uint16 +	Path        []string + +	Name     string +	TagId    uint16 +	Children map[uint16]*MappedIfd +} + +// String returns a descriptive string. +func (mi *MappedIfd) String() string { +	pathPhrase := mi.PathPhrase() +	return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase) +} + +// PathPhrase returns a non-fully-qualified IFD path. +func (mi *MappedIfd) PathPhrase() string { +	return strings.Join(mi.Path, "/") +} + +// TODO(dustin): Refactor this to use IfdIdentity structs. + +// IfdMapping describes all of the IFDs that we currently recognize. +type IfdMapping struct { +	rootNode *MappedIfd +} + +// NewIfdMapping returns a new IfdMapping struct. +func NewIfdMapping() (ifdMapping *IfdMapping) { +	rootNode := &MappedIfd{ +		Path:     make([]string, 0), +		Children: make(map[uint16]*MappedIfd), +	} + +	return &IfdMapping{ +		rootNode: rootNode, +	} +} + +// NewIfdMappingWithStandard retruns a new IfdMapping struct preloaded with the +// standard IFDs. +func NewIfdMappingWithStandard() (ifdMapping *IfdMapping, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	im := NewIfdMapping() + +	err = LoadStandardIfds(im) +	log.PanicIf(err) + +	return im, nil +} + +// Get returns the node given the path slice. +func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ptr := im.rootNode +	for _, tagId := range parentPlacement { +		if descendantPtr, found := ptr.Children[tagId]; found == false { +			log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase()) +		} else { +			ptr = descendantPtr +		} +	} + +	return ptr, nil +} + +// GetWithPath returns the node given the path string. +func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	if pathPhrase == "" { +		log.Panicf("path-phrase is empty") +	} + +	path := strings.Split(pathPhrase, "/") +	ptr := im.rootNode + +	for _, name := range path { +		var hit *MappedIfd +		for _, mi := range ptr.Children { +			if mi.Name == name { +				hit = mi +				break +			} +		} + +		if hit == nil { +			log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase()) +		} + +		ptr = hit +	} + +	return ptr, nil +} + +// GetChild is a convenience function to get the child path for a given parent +// placement and child tag-ID. +func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	mi, err = im.GetWithPath(parentPathPhrase) +	log.PanicIf(err) + +	for _, childMi := range mi.Children { +		if childMi.TagId == tagId { +			return childMi, nil +		} +	} + +	// Whether or not an IFD is defined in data, such an IFD is not registered +	// and would be unknown. +	log.Panic(ErrChildIfdNotMapped) +	return nil, nil +} + +// IfdTagIdAndIndex represents a specific part of the IFD path. +// +// This is a legacy type. +type IfdTagIdAndIndex struct { +	Name  string +	TagId uint16 +	Index int +} + +// String returns a descriptive string. +func (itii IfdTagIdAndIndex) String() string { +	return fmt.Sprintf("IfdTagIdAndIndex<NAME=[%s] ID=(%04x) INDEX=(%d)>", itii.Name, itii.TagId, itii.Index) +} + +// ResolvePath takes a list of names, which can also be suffixed with indices +// (to identify the second, third, etc.. sibling IFD) and returns a list of +// tag-IDs and those indices. +// +// Example: +// +// - IFD/Exif/Iop +// - IFD0/Exif/Iop +// +// This is the only call that supports adding the numeric indices. +func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	pathPhrase = strings.TrimSpace(pathPhrase) + +	if pathPhrase == "" { +		log.Panicf("can not resolve empty path-phrase") +	} + +	path := strings.Split(pathPhrase, "/") +	lineage = make([]IfdTagIdAndIndex, len(path)) + +	ptr := im.rootNode +	empty := IfdTagIdAndIndex{} +	for i, name := range path { +		indexByte := name[len(name)-1] +		index := 0 +		if indexByte >= '0' && indexByte <= '9' { +			index = int(indexByte - '0') +			name = name[:len(name)-1] +		} + +		itii := IfdTagIdAndIndex{} +		for _, mi := range ptr.Children { +			if mi.Name != name { +				continue +			} + +			itii.Name = name +			itii.TagId = mi.TagId +			itii.Index = index + +			ptr = mi + +			break +		} + +		if itii == empty { +			log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase) +		} + +		lineage[i] = itii +	} + +	return lineage, nil +} + +// FqPathPhraseFromLineage returns the fully-qualified IFD path from the slice. +func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) { +	fqPathParts := make([]string, len(lineage)) +	for i, itii := range lineage { +		if itii.Index > 0 { +			fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index) +		} else { +			fqPathParts[i] = itii.Name +		} +	} + +	return strings.Join(fqPathParts, "/") +} + +// PathPhraseFromLineage returns the non-fully-qualified IFD path from the +// slice. +func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) { +	pathParts := make([]string, len(lineage)) +	for i, itii := range lineage { +		pathParts[i] = itii.Name +	} + +	return strings.Join(pathParts, "/") +} + +// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no +// indices). +func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	lineage, err := im.ResolvePath(pathPhrase) +	log.PanicIf(err) + +	strippedPathPhrase = im.PathPhraseFromLineage(lineage) +	return strippedPathPhrase, nil +} + +// Add puts the given IFD at the given position of the tree. The position of the +// tree is referred to as the placement and is represented by a set of tag-IDs, +// where the leftmost is the root tag and the tags going to the right are +// progressive descendants. +func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs. + +	ptr, err := im.Get(parentPlacement) +	log.PanicIf(err) + +	path := make([]string, len(parentPlacement)+1) +	if len(parentPlacement) > 0 { +		copy(path, ptr.Path) +	} + +	path[len(path)-1] = name + +	placement := make([]uint16, len(parentPlacement)+1) +	if len(placement) > 0 { +		copy(placement, ptr.Placement) +	} + +	placement[len(placement)-1] = tagId + +	childIfd := &MappedIfd{ +		ParentTagId: ptr.TagId, +		Path:        path, +		Placement:   placement, +		Name:        name, +		TagId:       tagId, +		Children:    make(map[uint16]*MappedIfd), +	} + +	if _, found := ptr.Children[tagId]; found == true { +		log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId) +	} + +	ptr.Children[tagId] = childIfd + +	return nil +} + +func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	currentIfd := stack[len(stack)-1] + +	output = input +	for _, childIfd := range currentIfd.Children { +		stackCopy := make([]*MappedIfd, len(stack)+1) + +		copy(stackCopy, stack) +		stackCopy[len(stack)] = childIfd + +		// Add to output, but don't include the obligatory root node. +		parts := make([]string, len(stackCopy)-1) +		for i, mi := range stackCopy[1:] { +			parts[i] = mi.Name +		} + +		output = append(output, strings.Join(parts, "/")) + +		output, err = im.dumpLineages(stackCopy, output) +		log.PanicIf(err) +	} + +	return output, nil +} + +// DumpLineages returns a slice of strings representing all mappings. +func (im *IfdMapping) DumpLineages() (output []string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	stack := []*MappedIfd{im.rootNode} +	output = make([]string, 0) + +	output, err = im.dumpLineages(stack, output) +	log.PanicIf(err) + +	return output, nil +} + +// LoadStandardIfds loads the standard IFDs into the mapping. +func LoadStandardIfds(im *IfdMapping) (err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	err = im.Add( +		[]uint16{}, +		IfdStandardIfdIdentity.TagId(), IfdStandardIfdIdentity.Name()) + +	log.PanicIf(err) + +	err = im.Add( +		[]uint16{IfdStandardIfdIdentity.TagId()}, +		IfdExifStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.Name()) + +	log.PanicIf(err) + +	err = im.Add( +		[]uint16{IfdStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.TagId()}, +		IfdExifIopStandardIfdIdentity.TagId(), IfdExifIopStandardIfdIdentity.Name()) + +	log.PanicIf(err) + +	err = im.Add( +		[]uint16{IfdStandardIfdIdentity.TagId()}, +		IfdGpsInfoStandardIfdIdentity.TagId(), IfdGpsInfoStandardIfdIdentity.Name()) + +	log.PanicIf(err) + +	return nil +} + +// IfdTag describes a single IFD tag and its parent (if any). +type IfdTag struct { +	parentIfdTag *IfdTag +	tagId        uint16 +	name         string +} + +func NewIfdTag(parentIfdTag *IfdTag, tagId uint16, name string) IfdTag { +	return IfdTag{ +		parentIfdTag: parentIfdTag, +		tagId:        tagId, +		name:         name, +	} +} + +// ParentIfd returns the IfdTag of this IFD's parent. +func (it IfdTag) ParentIfd() *IfdTag { +	return it.parentIfdTag +} + +// TagId returns the tag-ID of this IFD. +func (it IfdTag) TagId() uint16 { +	return it.tagId +} + +// Name returns the simple name of this IFD. +func (it IfdTag) Name() string { +	return it.name +} + +// String returns a descriptive string. +func (it IfdTag) String() string { +	parentIfdPhrase := "" +	if it.parentIfdTag != nil { +		parentIfdPhrase = fmt.Sprintf(" PARENT=(0x%04x)[%s]", it.parentIfdTag.tagId, it.parentIfdTag.name) +	} + +	return fmt.Sprintf("IfdTag<TAG-ID=(0x%04x) NAME=[%s]%s>", it.tagId, it.name, parentIfdPhrase) +} + +var ( +	// rootStandardIfd is the standard root IFD. +	rootStandardIfd = NewIfdTag(nil, 0x0000, "IFD") // IFD + +	// exifStandardIfd is the standard "Exif" IFD. +	exifStandardIfd = NewIfdTag(&rootStandardIfd, 0x8769, "Exif") // IFD/Exif + +	// iopStandardIfd is the standard "Iop" IFD. +	iopStandardIfd = NewIfdTag(&exifStandardIfd, 0xA005, "Iop") // IFD/Exif/Iop + +	// gpsInfoStandardIfd is the standard "GPS" IFD. +	gpsInfoStandardIfd = NewIfdTag(&rootStandardIfd, 0x8825, "GPSInfo") // IFD/GPSInfo +) + +// IfdIdentityPart represents one component in an IFD path. +type IfdIdentityPart struct { +	Name  string +	Index int +} + +// String returns a fully-qualified IFD path. +func (iip IfdIdentityPart) String() string { +	if iip.Index > 0 { +		return fmt.Sprintf("%s%d", iip.Name, iip.Index) +	} else { +		return iip.Name +	} +} + +// UnindexedString returned a non-fully-qualified IFD path. +func (iip IfdIdentityPart) UnindexedString() string { +	return iip.Name +} + +// IfdIdentity represents a single IFD path and provides access to various +// information and representations. +// +// Only global instances can be used for equality checks. +type IfdIdentity struct { +	ifdTag    IfdTag +	parts     []IfdIdentityPart +	ifdPath   string +	fqIfdPath string +} + +// NewIfdIdentity returns a new IfdIdentity struct. +func NewIfdIdentity(ifdTag IfdTag, parts ...IfdIdentityPart) (ii *IfdIdentity) { +	ii = &IfdIdentity{ +		ifdTag: ifdTag, +		parts:  parts, +	} + +	ii.ifdPath = ii.getIfdPath() +	ii.fqIfdPath = ii.getFqIfdPath() + +	return ii +} + +// NewIfdIdentityFromString parses a string like "IFD/Exif" or "IFD1" or +// something more exotic with custom IFDs ("SomeIFD4/SomeChildIFD6"). Note that +// this will valid the unindexed IFD structure (because the standard tags from +// the specification are unindexed), but not, obviously, any indices (e.g. +// the numbers in "IFD0", "IFD1", "SomeIFD4/SomeChildIFD6"). It is +// required for the caller to check whether these specific instances +// were actually parsed out of the stream. +func NewIfdIdentityFromString(im *IfdMapping, fqIfdPath string) (ii *IfdIdentity, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	lineage, err := im.ResolvePath(fqIfdPath) +	log.PanicIf(err) + +	var lastIt *IfdTag +	identityParts := make([]IfdIdentityPart, len(lineage)) +	for i, itii := range lineage { +		// Build out the tag that will eventually point to the IFD represented +		// by the right-most part in the IFD path. + +		it := &IfdTag{ +			parentIfdTag: lastIt, +			tagId:        itii.TagId, +			name:         itii.Name, +		} + +		lastIt = it + +		// Create the next IfdIdentity part. + +		iip := IfdIdentityPart{ +			Name:  itii.Name, +			Index: itii.Index, +		} + +		identityParts[i] = iip +	} + +	ii = NewIfdIdentity(*lastIt, identityParts...) +	return ii, nil +} + +func (ii *IfdIdentity) getFqIfdPath() string { +	partPhrases := make([]string, len(ii.parts)) +	for i, iip := range ii.parts { +		partPhrases[i] = iip.String() +	} + +	return strings.Join(partPhrases, "/") +} + +func (ii *IfdIdentity) getIfdPath() string { +	partPhrases := make([]string, len(ii.parts)) +	for i, iip := range ii.parts { +		partPhrases[i] = iip.UnindexedString() +	} + +	return strings.Join(partPhrases, "/") +} + +// String returns a fully-qualified IFD path. +func (ii *IfdIdentity) String() string { +	return ii.fqIfdPath +} + +// UnindexedString returns a non-fully-qualified IFD path. +func (ii *IfdIdentity) UnindexedString() string { +	return ii.ifdPath +} + +// IfdTag returns the tag struct behind this IFD. +func (ii *IfdIdentity) IfdTag() IfdTag { +	return ii.ifdTag +} + +// TagId returns the tag-ID of the IFD. +func (ii *IfdIdentity) TagId() uint16 { +	return ii.ifdTag.TagId() +} + +// LeafPathPart returns the last right-most path-part, which represents the +// current IFD. +func (ii *IfdIdentity) LeafPathPart() IfdIdentityPart { +	return ii.parts[len(ii.parts)-1] +} + +// Name returns the simple name of this IFD. +func (ii *IfdIdentity) Name() string { +	return ii.LeafPathPart().Name +} + +// Index returns the index of this IFD (more then one IFD under a parent IFD +// will be numbered [0..n]). +func (ii *IfdIdentity) Index() int { +	return ii.LeafPathPart().Index +} + +// Equals returns true if the two IfdIdentity instances are effectively +// identical. +// +// Since there's no way to get a specific fully-qualified IFD path without a +// certain slice of parts and all other fields are also derived from this, +// checking that the fully-qualified IFD path is equals is sufficient. +func (ii *IfdIdentity) Equals(ii2 *IfdIdentity) bool { +	return ii.String() == ii2.String() +} + +// NewChild creates an IfdIdentity for an IFD that is a child of the current +// IFD. +func (ii *IfdIdentity) NewChild(childIfdTag IfdTag, index int) (iiChild *IfdIdentity) { +	if *childIfdTag.parentIfdTag != ii.ifdTag { +		log.Panicf("can not add child; we are not the parent:\nUS=%v\nCHILD=%v", ii.ifdTag, childIfdTag) +	} + +	childPart := IfdIdentityPart{childIfdTag.name, index} +	childParts := append(ii.parts, childPart) + +	iiChild = NewIfdIdentity(childIfdTag, childParts...) +	return iiChild +} + +// NewSibling creates an IfdIdentity for an IFD that is a sibling to the current +// one. +func (ii *IfdIdentity) NewSibling(index int) (iiSibling *IfdIdentity) { +	parts := make([]IfdIdentityPart, len(ii.parts)) + +	copy(parts, ii.parts) +	parts[len(parts)-1].Index = index + +	iiSibling = NewIfdIdentity(ii.ifdTag, parts...) +	return iiSibling +} + +var ( +	// IfdStandardIfdIdentity represents the IFD path for IFD0. +	IfdStandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 0}) + +	// IfdExifStandardIfdIdentity represents the IFD path for IFD0/Exif0. +	IfdExifStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(exifStandardIfd, 0) + +	// IfdExifIopStandardIfdIdentity represents the IFD path for IFD0/Exif0/Iop0. +	IfdExifIopStandardIfdIdentity = IfdExifStandardIfdIdentity.NewChild(iopStandardIfd, 0) + +	// IfdGPSInfoStandardIfdIdentity represents the IFD path for IFD0/GPSInfo0. +	IfdGpsInfoStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(gpsInfoStandardIfd, 0) + +	// Ifd1StandardIfdIdentity represents the IFD path for IFD1. +	Ifd1StandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 1}) +) diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/parser.go b/vendor/github.com/dsoprea/go-exif/v3/common/parser.go new file mode 100644 index 000000000..76e8ef425 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/parser.go @@ -0,0 +1,280 @@ +package exifcommon + +import ( +	"bytes" +	"errors" +	"math" + +	"encoding/binary" + +	"github.com/dsoprea/go-logging" +) + +var ( +	parserLogger = log.NewLogger("exifcommon.parser") +) + +var ( +	ErrParseFail = errors.New("parse failure") +) + +// Parser knows how to parse all well-defined, encoded EXIF types. +type Parser struct { +} + +// ParseBytesknows how to parse a byte-type value. +func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeByte.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = []uint8(data[:count]) + +	return value, nil +} + +// ParseAscii returns a string and auto-strips the trailing NUL character that +// should be at the end of the encoding. +func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeAscii.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	if len(data) == 0 || data[count-1] != 0 { +		s := string(data[:count]) +		parserLogger.Warningf(nil, "ASCII not terminated with NUL as expected: [%v]", s) + +		for i, c := range s { +			if c > 127 { +				// Binary + +				t := s[:i] +				parserLogger.Warningf(nil, "ASCII also had binary characters. Truncating: [%v]->[%s]", s, t) + +				return t, nil +			} +		} + +		return s, nil +	} + +	// Auto-strip the NUL from the end. It serves no purpose outside of +	// encoding semantics. + +	return string(data[:count-1]), nil +} + +// ParseAsciiNoNul returns a string without any consideration for a trailing NUL +// character. +func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeAscii.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	return string(data[:count]), nil +} + +// ParseShorts knows how to parse an encoded list of shorts. +func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeShort.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = make([]uint16, count) +	for i := 0; i < count; i++ { +		value[i] = byteOrder.Uint16(data[i*2:]) +	} + +	return value, nil +} + +// ParseLongs knows how to encode an encoded list of unsigned longs. +func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeLong.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = make([]uint32, count) +	for i := 0; i < count; i++ { +		value[i] = byteOrder.Uint32(data[i*4:]) +	} + +	return value, nil +} + +// ParseFloats knows how to encode an encoded list of floats. +func (p *Parser) ParseFloats(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	count := int(unitCount) + +	if len(data) != (TypeFloat.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = make([]float32, count) +	for i := 0; i < count; i++ { +		value[i] = math.Float32frombits(byteOrder.Uint32(data[i*4 : (i+1)*4])) +	} + +	return value, nil +} + +// ParseDoubles knows how to encode an encoded list of doubles. +func (p *Parser) ParseDoubles(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float64, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	count := int(unitCount) + +	if len(data) != (TypeDouble.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = make([]float64, count) +	for i := 0; i < count; i++ { +		value[i] = math.Float64frombits(byteOrder.Uint64(data[i*8 : (i+1)*8])) +	} + +	return value, nil +} + +// ParseRationals knows how to parse an encoded list of unsigned rationals. +func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeRational.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	value = make([]Rational, count) +	for i := 0; i < count; i++ { +		value[i].Numerator = byteOrder.Uint32(data[i*8:]) +		value[i].Denominator = byteOrder.Uint32(data[i*8+4:]) +	} + +	return value, nil +} + +// ParseSignedLongs knows how to parse an encoded list of signed longs. +func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeSignedLong.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	b := bytes.NewBuffer(data) + +	value = make([]int32, count) +	for i := 0; i < count; i++ { +		err := binary.Read(b, byteOrder, &value[i]) +		log.PanicIf(err) +	} + +	return value, nil +} + +// ParseSignedRationals knows how to parse an encoded list of signed +// rationals. +func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): Add test + +	count := int(unitCount) + +	if len(data) < (TypeSignedRational.Size() * count) { +		log.Panic(ErrNotEnoughData) +	} + +	b := bytes.NewBuffer(data) + +	value = make([]SignedRational, count) +	for i := 0; i < count; i++ { +		err = binary.Read(b, byteOrder, &value[i].Numerator) +		log.PanicIf(err) + +		err = binary.Read(b, byteOrder, &value[i].Denominator) +		log.PanicIf(err) +	} + +	return value, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go b/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go new file mode 100644 index 000000000..f04fa22b6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go @@ -0,0 +1,88 @@ +package exifcommon + +import ( +	"os" +	"path" + +	"encoding/binary" +	"io/ioutil" + +	"github.com/dsoprea/go-logging" +) + +var ( +	moduleRootPath = "" + +	testExifData []byte = nil + +	// EncodeDefaultByteOrder is the default byte-order for encoding operations. +	EncodeDefaultByteOrder = binary.BigEndian + +	// Default byte order for tests. +	TestDefaultByteOrder = binary.BigEndian +) + +func GetModuleRootPath() string { +	if moduleRootPath == "" { +		moduleRootPath = os.Getenv("EXIF_MODULE_ROOT_PATH") +		if moduleRootPath != "" { +			return moduleRootPath +		} + +		currentWd, err := os.Getwd() +		log.PanicIf(err) + +		currentPath := currentWd + +		visited := make([]string, 0) + +		for { +			tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + +			_, err := os.Stat(tryStampFilepath) +			if err != nil && os.IsNotExist(err) != true { +				log.Panic(err) +			} else if err == nil { +				break +			} + +			visited = append(visited, tryStampFilepath) + +			currentPath = path.Dir(currentPath) +			if currentPath == "/" { +				log.Panicf("could not find module-root: %v", visited) +			} +		} + +		moduleRootPath = currentPath +	} + +	return moduleRootPath +} + +func GetTestAssetsPath() string { +	moduleRootPath := GetModuleRootPath() +	assetsPath := path.Join(moduleRootPath, "assets") + +	return assetsPath +} + +func getTestImageFilepath() string { +	assetsPath := GetTestAssetsPath() +	testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg") +	return testImageFilepath +} + +func getTestExifData() []byte { +	if testExifData == nil { +		assetsPath := GetTestAssetsPath() +		filepath := path.Join(assetsPath, "NDM_8901.jpg.exif") + +		var err error + +		testExifData, err = ioutil.ReadFile(filepath) +		log.PanicIf(err) +	} + +	return testExifData +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/type.go b/vendor/github.com/dsoprea/go-exif/v3/common/type.go new file mode 100644 index 000000000..e79bcb9a1 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/type.go @@ -0,0 +1,482 @@ +package exifcommon + +import ( +	"errors" +	"fmt" +	"reflect" +	"strconv" +	"strings" +	"unicode" + +	"encoding/binary" + +	"github.com/dsoprea/go-logging" +) + +var ( +	typeLogger = log.NewLogger("exif.type") +) + +var ( +	// ErrNotEnoughData is used when there isn't enough data to accommodate what +	// we're trying to parse (sizeof(type) * unit_count). +	ErrNotEnoughData = errors.New("not enough data for type") + +	// ErrWrongType is used when we try to parse anything other than the +	// current type. +	ErrWrongType = errors.New("wrong type, can not parse") + +	// ErrUnhandledUndefinedTypedTag is used when we try to parse a tag that's +	// recorded as an "unknown" type but not a documented tag (therefore +	// leaving us not knowning how to read it). +	ErrUnhandledUndefinedTypedTag = errors.New("not a standard unknown-typed tag") +) + +// TagTypePrimitive is a type-alias that let's us easily lookup type properties. +type TagTypePrimitive uint16 + +const ( +	// TypeByte describes an encoded list of bytes. +	TypeByte TagTypePrimitive = 1 + +	// TypeAscii describes an encoded list of characters that is terminated +	// with a NUL in its encoded form. +	TypeAscii TagTypePrimitive = 2 + +	// TypeShort describes an encoded list of shorts. +	TypeShort TagTypePrimitive = 3 + +	// TypeLong describes an encoded list of longs. +	TypeLong TagTypePrimitive = 4 + +	// TypeRational describes an encoded list of rationals. +	TypeRational TagTypePrimitive = 5 + +	// TypeUndefined describes an encoded value that has a complex/non-clearcut +	// interpretation. +	TypeUndefined TagTypePrimitive = 7 + +	// We've seen type-8, but have no documentation on it. + +	// TypeSignedLong describes an encoded list of signed longs. +	TypeSignedLong TagTypePrimitive = 9 + +	// TypeSignedRational describes an encoded list of signed rationals. +	TypeSignedRational TagTypePrimitive = 10 + +	// TypeFloat describes an encoded list of floats +	TypeFloat TagTypePrimitive = 11 + +	// TypeDouble describes an encoded list of doubles. +	TypeDouble TagTypePrimitive = 12 + +	// TypeAsciiNoNul is just a pseudo-type, for our own purposes. +	TypeAsciiNoNul TagTypePrimitive = 0xf0 +) + +// String returns the name of the type +func (typeType TagTypePrimitive) String() string { +	return TypeNames[typeType] +} + +// Size returns the size of one atomic unit of the type. +func (tagType TagTypePrimitive) Size() int { +	switch tagType { +	case TypeByte, TypeAscii, TypeAsciiNoNul: +		return 1 +	case TypeShort: +		return 2 +	case TypeLong, TypeSignedLong, TypeFloat: +		return 4 +	case TypeRational, TypeSignedRational, TypeDouble: +		return 8 +	default: +		log.Panicf("can not determine tag-value size for type (%d): [%s]", +			tagType, +			TypeNames[tagType]) +		// Never called. +		return 0 +	} +} + +// IsValid returns true if tagType is a valid type. +func (tagType TagTypePrimitive) IsValid() bool { + +	// TODO(dustin): Add test + +	return tagType == TypeByte || +		tagType == TypeAscii || +		tagType == TypeAsciiNoNul || +		tagType == TypeShort || +		tagType == TypeLong || +		tagType == TypeRational || +		tagType == TypeSignedLong || +		tagType == TypeSignedRational || +		tagType == TypeFloat || +		tagType == TypeDouble || +		tagType == TypeUndefined +} + +var ( +	// TODO(dustin): Rename TypeNames() to typeNames() and add getter. +	TypeNames = map[TagTypePrimitive]string{ +		TypeByte:           "BYTE", +		TypeAscii:          "ASCII", +		TypeShort:          "SHORT", +		TypeLong:           "LONG", +		TypeRational:       "RATIONAL", +		TypeUndefined:      "UNDEFINED", +		TypeSignedLong:     "SLONG", +		TypeSignedRational: "SRATIONAL", +		TypeFloat:          "FLOAT", +		TypeDouble:         "DOUBLE", + +		TypeAsciiNoNul: "_ASCII_NO_NUL", +	} + +	typeNamesR = map[string]TagTypePrimitive{} +) + +// Rational describes an unsigned rational value. +type Rational struct { +	// Numerator is the numerator of the rational value. +	Numerator uint32 + +	// Denominator is the numerator of the rational value. +	Denominator uint32 +} + +// SignedRational describes a signed rational value. +type SignedRational struct { +	// Numerator is the numerator of the rational value. +	Numerator int32 + +	// Denominator is the numerator of the rational value. +	Denominator int32 +} + +func isPrintableText(s string) bool { +	for _, c := range s { +		// unicode.IsPrint() returns false for newline characters. +		if c == 0x0d || c == 0x0a { +			continue +		} else if unicode.IsPrint(rune(c)) == false { +			return false +		} +	} + +	return true +} + +// Format returns a stringified value for the given encoding. Automatically +// parses. Automatically calculates count based on type size. This function +// also supports undefined-type values (the ones that we support, anyway) by +// way of the String() method that they all require. We can't be more specific +// because we're a base package and we can't refer to it. +func FormatFromType(value interface{}, justFirst bool) (phrase string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): !! Add test + +	switch t := value.(type) { +	case []byte: +		return DumpBytesToString(t), nil +	case string: +		for i, c := range t { +			if c == 0 { +				t = t[:i] +				break +			} +		} + +		if isPrintableText(t) == false { +			phrase = fmt.Sprintf("string with binary data (%d bytes)", len(t)) +			return phrase, nil +		} + +		return t, nil +	case []uint16, []uint32, []int32, []float64, []float32: +		val := reflect.ValueOf(t) + +		if val.Len() == 0 { +			return "", nil +		} + +		if justFirst == true { +			var valueSuffix string +			if val.Len() > 1 { +				valueSuffix = "..." +			} + +			return fmt.Sprintf("%v%s", val.Index(0), valueSuffix), nil +		} + +		return fmt.Sprintf("%v", val), nil +	case []Rational: +		if len(t) == 0 { +			return "", nil +		} + +		parts := make([]string, len(t)) +		for i, r := range t { +			parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) + +			if justFirst == true { +				break +			} +		} + +		if justFirst == true { +			var valueSuffix string +			if len(t) > 1 { +				valueSuffix = "..." +			} + +			return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil +		} + +		return fmt.Sprintf("%v", parts), nil +	case []SignedRational: +		if len(t) == 0 { +			return "", nil +		} + +		parts := make([]string, len(t)) +		for i, r := range t { +			parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) + +			if justFirst == true { +				break +			} +		} + +		if justFirst == true { +			var valueSuffix string +			if len(t) > 1 { +				valueSuffix = "..." +			} + +			return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil +		} + +		return fmt.Sprintf("%v", parts), nil +	case fmt.Stringer: +		s := t.String() +		if isPrintableText(s) == false { +			phrase = fmt.Sprintf("stringable with binary data (%d bytes)", len(s)) +			return phrase, nil +		} + +		// An undefined value that is documented (or that we otherwise support). +		return s, nil +	default: +		// Affects only "unknown" values, in general. +		log.Panicf("type can not be formatted into string: %v", reflect.TypeOf(value).Name()) + +		// Never called. +		return "", nil +	} +} + +// Format returns a stringified value for the given encoding. Automatically +// parses. Automatically calculates count based on type size. +func FormatFromBytes(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (phrase string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	// TODO(dustin): !! Add test + +	typeSize := tagType.Size() + +	if len(rawBytes)%typeSize != 0 { +		log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize) +	} + +	// unitCount is the calculated unit-count. This should equal the original +	// value from the tag (pre-resolution). +	unitCount := uint32(len(rawBytes) / typeSize) + +	// Truncate the items if it's not bytes or a string and we just want the first. + +	var value interface{} + +	switch tagType { +	case TypeByte: +		var err error + +		value, err = parser.ParseBytes(rawBytes, unitCount) +		log.PanicIf(err) +	case TypeAscii: +		var err error + +		value, err = parser.ParseAscii(rawBytes, unitCount) +		log.PanicIf(err) +	case TypeAsciiNoNul: +		var err error + +		value, err = parser.ParseAsciiNoNul(rawBytes, unitCount) +		log.PanicIf(err) +	case TypeShort: +		var err error + +		value, err = parser.ParseShorts(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeLong: +		var err error + +		value, err = parser.ParseLongs(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeFloat: +		var err error + +		value, err = parser.ParseFloats(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeDouble: +		var err error + +		value, err = parser.ParseDoubles(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeRational: +		var err error + +		value, err = parser.ParseRationals(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeSignedLong: +		var err error + +		value, err = parser.ParseSignedLongs(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	case TypeSignedRational: +		var err error + +		value, err = parser.ParseSignedRationals(rawBytes, unitCount, byteOrder) +		log.PanicIf(err) +	default: +		// Affects only "unknown" values, in general. +		log.Panicf("value of type [%s] can not be formatted into string", tagType.String()) + +		// Never called. +		return "", nil +	} + +	phrase, err = FormatFromType(value, justFirst) +	log.PanicIf(err) + +	return phrase, nil +} + +// TranslateStringToType converts user-provided strings to properly-typed +// values. If a string, returns a string. Else, assumes that it's a single +// number. If a list needs to be processed, it is the caller's responsibility to +// split it (according to whichever convention has been established). +func TranslateStringToType(tagType TagTypePrimitive, valueString string) (value interface{}, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	if tagType == TypeUndefined { +		// The caller should just call String() on the decoded type. +		log.Panicf("undefined-type values are not supported") +	} + +	if tagType == TypeByte { +		wide, err := strconv.ParseInt(valueString, 16, 8) +		log.PanicIf(err) + +		return byte(wide), nil +	} else if tagType == TypeAscii || tagType == TypeAsciiNoNul { +		// Whether or not we're putting an NUL on the end is only relevant for +		// byte-level encoding. This function really just supports a user +		// interface. + +		return valueString, nil +	} else if tagType == TypeShort { +		n, err := strconv.ParseUint(valueString, 10, 16) +		log.PanicIf(err) + +		return uint16(n), nil +	} else if tagType == TypeLong { +		n, err := strconv.ParseUint(valueString, 10, 32) +		log.PanicIf(err) + +		return uint32(n), nil +	} else if tagType == TypeRational { +		parts := strings.SplitN(valueString, "/", 2) + +		numerator, err := strconv.ParseUint(parts[0], 10, 32) +		log.PanicIf(err) + +		denominator, err := strconv.ParseUint(parts[1], 10, 32) +		log.PanicIf(err) + +		return Rational{ +			Numerator:   uint32(numerator), +			Denominator: uint32(denominator), +		}, nil +	} else if tagType == TypeSignedLong { +		n, err := strconv.ParseInt(valueString, 10, 32) +		log.PanicIf(err) + +		return int32(n), nil +	} else if tagType == TypeFloat { +		n, err := strconv.ParseFloat(valueString, 32) +		log.PanicIf(err) + +		return float32(n), nil +	} else if tagType == TypeDouble { +		n, err := strconv.ParseFloat(valueString, 64) +		log.PanicIf(err) + +		return float64(n), nil +	} else if tagType == TypeSignedRational { +		parts := strings.SplitN(valueString, "/", 2) + +		numerator, err := strconv.ParseInt(parts[0], 10, 32) +		log.PanicIf(err) + +		denominator, err := strconv.ParseInt(parts[1], 10, 32) +		log.PanicIf(err) + +		return SignedRational{ +			Numerator:   int32(numerator), +			Denominator: int32(denominator), +		}, nil +	} + +	log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String()) +	return nil, nil +} + +// GetTypeByName returns the `TagTypePrimitive` for the given type name. +// Returns (0) if not valid. +func GetTypeByName(typeName string) (tagType TagTypePrimitive, found bool) { +	tagType, found = typeNamesR[typeName] +	return tagType, found +} + +// BasicTag describes a single tag for any purpose. +type BasicTag struct { +	// FqIfdPath is the fully-qualified IFD-path. +	FqIfdPath string + +	// IfdPath is the unindexed IFD-path. +	IfdPath string + +	// TagId is the tag-ID. +	TagId uint16 +} + +func init() { +	for typeId, typeName := range TypeNames { +		typeNamesR[typeName] = typeId +	} +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/utility.go b/vendor/github.com/dsoprea/go-exif/v3/common/utility.go new file mode 100644 index 000000000..575049706 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/utility.go @@ -0,0 +1,148 @@ +package exifcommon + +import ( +	"bytes" +	"fmt" +	"reflect" +	"strconv" +	"strings" +	"time" + +	"github.com/dsoprea/go-logging" +) + +var ( +	timeType = reflect.TypeOf(time.Time{}) +) + +// DumpBytes prints a list of hex-encoded bytes. +func DumpBytes(data []byte) { +	fmt.Printf("DUMP: ") +	for _, x := range data { +		fmt.Printf("%02x ", x) +	} + +	fmt.Printf("\n") +} + +// DumpBytesClause prints a list like DumpBytes(), but encapsulated in +// "[]byte { ... }". +func DumpBytesClause(data []byte) { +	fmt.Printf("DUMP: ") + +	fmt.Printf("[]byte { ") + +	for i, x := range data { +		fmt.Printf("0x%02x", x) + +		if i < len(data)-1 { +			fmt.Printf(", ") +		} +	} + +	fmt.Printf(" }\n") +} + +// DumpBytesToString returns a stringified list of hex-encoded bytes. +func DumpBytesToString(data []byte) string { +	b := new(bytes.Buffer) + +	for i, x := range data { +		_, err := b.WriteString(fmt.Sprintf("%02x", x)) +		log.PanicIf(err) + +		if i < len(data)-1 { +			_, err := b.WriteRune(' ') +			log.PanicIf(err) +		} +	} + +	return b.String() +} + +// DumpBytesClauseToString returns a comma-separated list of hex-encoded bytes. +func DumpBytesClauseToString(data []byte) string { +	b := new(bytes.Buffer) + +	for i, x := range data { +		_, err := b.WriteString(fmt.Sprintf("0x%02x", x)) +		log.PanicIf(err) + +		if i < len(data)-1 { +			_, err := b.WriteString(", ") +			log.PanicIf(err) +		} +	} + +	return b.String() +} + +// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a +// `time.Time` struct. It will attempt to convert to UTC first. +func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) { +	t = t.UTC() + +	return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +// ParseExifFullTimestamp parses dates like "2018:11:30 13:01:49" into a UTC +// `time.Time` struct. +func ParseExifFullTimestamp(fullTimestampPhrase string) (timestamp time.Time, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	parts := strings.Split(fullTimestampPhrase, " ") +	datestampValue, timestampValue := parts[0], parts[1] + +	// Normalize the separators. +	datestampValue = strings.ReplaceAll(datestampValue, "-", ":") +	timestampValue = strings.ReplaceAll(timestampValue, "-", ":") + +	dateParts := strings.Split(datestampValue, ":") + +	year, err := strconv.ParseUint(dateParts[0], 10, 16) +	if err != nil { +		log.Panicf("could not parse year") +	} + +	month, err := strconv.ParseUint(dateParts[1], 10, 8) +	if err != nil { +		log.Panicf("could not parse month") +	} + +	day, err := strconv.ParseUint(dateParts[2], 10, 8) +	if err != nil { +		log.Panicf("could not parse day") +	} + +	timeParts := strings.Split(timestampValue, ":") + +	hour, err := strconv.ParseUint(timeParts[0], 10, 8) +	if err != nil { +		log.Panicf("could not parse hour") +	} + +	minute, err := strconv.ParseUint(timeParts[1], 10, 8) +	if err != nil { +		log.Panicf("could not parse minute") +	} + +	second, err := strconv.ParseUint(timeParts[2], 10, 8) +	if err != nil { +		log.Panicf("could not parse second") +	} + +	timestamp = time.Date(int(year), time.Month(month), int(day), int(hour), int(minute), int(second), 0, time.UTC) +	return timestamp, nil +} + +// IsTime returns true if the value is a `time.Time`. +func IsTime(v interface{}) bool { + +	// TODO(dustin): Add test + +	return reflect.TypeOf(v) == timeType +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go new file mode 100644 index 000000000..b9e634106 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go @@ -0,0 +1,464 @@ +package exifcommon + +import ( +	"errors" +	"io" + +	"encoding/binary" + +	"github.com/dsoprea/go-logging" +) + +var ( +	parser *Parser +) + +var ( +	// ErrNotFarValue indicates that an offset-based lookup was attempted for a +	// non-offset-based (embedded) value. +	ErrNotFarValue = errors.New("not a far value") +) + +// ValueContext embeds all of the parameters required to find and extract the +// actual tag value. +type ValueContext struct { +	unitCount      uint32 +	valueOffset    uint32 +	rawValueOffset []byte +	rs             io.ReadSeeker + +	tagType   TagTypePrimitive +	byteOrder binary.ByteOrder + +	// undefinedValueTagType is the effective type to use if this is an +	// "undefined" value. +	undefinedValueTagType TagTypePrimitive + +	ifdPath string +	tagId   uint16 +} + +// TODO(dustin): We can update newValueContext() to derive `valueOffset` itself (from `rawValueOffset`). + +// NewValueContext returns a new ValueContext struct. +func NewValueContext(ifdPath string, tagId uint16, unitCount, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, tagType TagTypePrimitive, byteOrder binary.ByteOrder) *ValueContext { +	return &ValueContext{ +		unitCount:      unitCount, +		valueOffset:    valueOffset, +		rawValueOffset: rawValueOffset, +		rs:             rs, + +		tagType:   tagType, +		byteOrder: byteOrder, + +		ifdPath: ifdPath, +		tagId:   tagId, +	} +} + +// SetUndefinedValueType sets the effective type if this is an unknown-type tag. +func (vc *ValueContext) SetUndefinedValueType(tagType TagTypePrimitive) { +	if vc.tagType != TypeUndefined { +		log.Panicf("can not set effective type for unknown-type tag because this is *not* an unknown-type tag") +	} + +	vc.undefinedValueTagType = tagType +} + +// UnitCount returns the embedded unit-count. +func (vc *ValueContext) UnitCount() uint32 { +	return vc.unitCount +} + +// ValueOffset returns the value-offset decoded as a `uint32`. +func (vc *ValueContext) ValueOffset() uint32 { +	return vc.valueOffset +} + +// RawValueOffset returns the uninterpreted value-offset. This is used for +// embedded values (values small enough to fit within the offset bytes rather +// than needing to be stored elsewhere and referred to by an actual offset). +func (vc *ValueContext) RawValueOffset() []byte { +	return vc.rawValueOffset +} + +// AddressableData returns the block of data that we can dereference into. +func (vc *ValueContext) AddressableData() io.ReadSeeker { + +	// RELEASE)dustin): Rename from AddressableData() to ReadSeeker() + +	return vc.rs +} + +// ByteOrder returns the byte-order of numbers. +func (vc *ValueContext) ByteOrder() binary.ByteOrder { +	return vc.byteOrder +} + +// IfdPath returns the path of the IFD containing this tag. +func (vc *ValueContext) IfdPath() string { +	return vc.ifdPath +} + +// TagId returns the ID of the tag that we represent. +func (vc *ValueContext) TagId() uint16 { +	return vc.tagId +} + +// isEmbedded returns whether the value is embedded or a reference. This can't +// be precalculated since the size is not defined for all types (namely the +// "undefined" types). +func (vc *ValueContext) isEmbedded() bool { +	tagType := vc.effectiveValueType() + +	return (tagType.Size() * int(vc.unitCount)) <= 4 +} + +// SizeInBytes returns the number of bytes that this value requires. The +// underlying call will panic if the type is UNDEFINED. It is the +// responsibility of the caller to preemptively check that. +func (vc *ValueContext) SizeInBytes() int { +	tagType := vc.effectiveValueType() + +	return tagType.Size() * int(vc.unitCount) +} + +// effectiveValueType returns the effective type of the unknown-type tag or, if +// not unknown, the actual type. +func (vc *ValueContext) effectiveValueType() (tagType TagTypePrimitive) { +	if vc.tagType == TypeUndefined { +		tagType = vc.undefinedValueTagType + +		if tagType == 0 { +			log.Panicf("undefined-value type not set") +		} +	} else { +		tagType = vc.tagType +	} + +	return tagType +} + +// readRawEncoded returns the encoded bytes for the value that we represent. +func (vc *ValueContext) readRawEncoded() (rawBytes []byte, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	tagType := vc.effectiveValueType() + +	unitSizeRaw := uint32(tagType.Size()) + +	if vc.isEmbedded() == true { +		byteLength := unitSizeRaw * vc.unitCount +		return vc.rawValueOffset[:byteLength], nil +	} + +	_, err = vc.rs.Seek(int64(vc.valueOffset), io.SeekStart) +	log.PanicIf(err) + +	rawBytes = make([]byte, vc.unitCount*unitSizeRaw) + +	_, err = io.ReadFull(vc.rs, rawBytes) +	log.PanicIf(err) + +	return rawBytes, nil +} + +// GetFarOffset returns the offset if the value is not embedded [within the +// pointer itself] or an error if an embedded value. +func (vc *ValueContext) GetFarOffset() (offset uint32, err error) { +	if vc.isEmbedded() == true { +		return 0, ErrNotFarValue +	} + +	return vc.valueOffset, nil +} + +// ReadRawEncoded returns the encoded bytes for the value that we represent. +func (vc *ValueContext) ReadRawEncoded() (rawBytes []byte, err error) { + +	// TODO(dustin): Remove this method and rename readRawEncoded in its place. + +	return vc.readRawEncoded() +} + +// Format returns a string representation for the value. +// +// Where the type is not ASCII, `justFirst` indicates whether to just stringify +// the first item in the slice (or return an empty string if the slice is +// empty). +// +// Since this method lacks the information to process undefined-type tags (e.g. +// byte-order, tag-ID, IFD type), it will return an error if attempted. See +// `Undefined()`. +func (vc *ValueContext) Format() (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawBytes, err := vc.readRawEncoded() +	log.PanicIf(err) + +	phrase, err := FormatFromBytes(rawBytes, vc.effectiveValueType(), false, vc.byteOrder) +	log.PanicIf(err) + +	return phrase, nil +} + +// FormatFirst is similar to `Format` but only gets and stringifies the first +// item. +func (vc *ValueContext) FormatFirst() (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawBytes, err := vc.readRawEncoded() +	log.PanicIf(err) + +	phrase, err := FormatFromBytes(rawBytes, vc.tagType, true, vc.byteOrder) +	log.PanicIf(err) + +	return phrase, nil +} + +// ReadBytes parses the encoded byte-array from the value-context. +func (vc *ValueContext) ReadBytes() (value []byte, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseBytes(rawValue, vc.unitCount) +	log.PanicIf(err) + +	return value, nil +} + +// ReadAscii parses the encoded NUL-terminated ASCII string from the value- +// context. +func (vc *ValueContext) ReadAscii() (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseAscii(rawValue, vc.unitCount) +	log.PanicIf(err) + +	return value, nil +} + +// ReadAsciiNoNul parses the non-NUL-terminated encoded ASCII string from the +// value-context. +func (vc *ValueContext) ReadAsciiNoNul() (value string, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseAsciiNoNul(rawValue, vc.unitCount) +	log.PanicIf(err) + +	return value, nil +} + +// ReadShorts parses the list of encoded shorts from the value-context. +func (vc *ValueContext) ReadShorts() (value []uint16, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseShorts(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadLongs parses the list of encoded, unsigned longs from the value-context. +func (vc *ValueContext) ReadLongs() (value []uint32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseLongs(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadFloats parses the list of encoded, floats from the value-context. +func (vc *ValueContext) ReadFloats() (value []float32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseFloats(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadDoubles parses the list of encoded, doubles from the value-context. +func (vc *ValueContext) ReadDoubles() (value []float64, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseDoubles(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadRationals parses the list of encoded, unsigned rationals from the value- +// context. +func (vc *ValueContext) ReadRationals() (value []Rational, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseRationals(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadSignedLongs parses the list of encoded, signed longs from the value-context. +func (vc *ValueContext) ReadSignedLongs() (value []int32, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseSignedLongs(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// ReadSignedRationals parses the list of encoded, signed rationals from the +// value-context. +func (vc *ValueContext) ReadSignedRationals() (value []SignedRational, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	rawValue, err := vc.readRawEncoded() +	log.PanicIf(err) + +	value, err = parser.ParseSignedRationals(rawValue, vc.unitCount, vc.byteOrder) +	log.PanicIf(err) + +	return value, nil +} + +// Values knows how to resolve the given value. This value is always a list +// (undefined-values aside), so we're named accordingly. +// +// Since this method lacks the information to process unknown-type tags (e.g. +// byte-order, tag-ID, IFD type), it will return an error if attempted. See +// `Undefined()`. +func (vc *ValueContext) Values() (values interface{}, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	if vc.tagType == TypeByte { +		values, err = vc.ReadBytes() +		log.PanicIf(err) +	} else if vc.tagType == TypeAscii { +		values, err = vc.ReadAscii() +		log.PanicIf(err) +	} else if vc.tagType == TypeAsciiNoNul { +		values, err = vc.ReadAsciiNoNul() +		log.PanicIf(err) +	} else if vc.tagType == TypeShort { +		values, err = vc.ReadShorts() +		log.PanicIf(err) +	} else if vc.tagType == TypeLong { +		values, err = vc.ReadLongs() +		log.PanicIf(err) +	} else if vc.tagType == TypeRational { +		values, err = vc.ReadRationals() +		log.PanicIf(err) +	} else if vc.tagType == TypeSignedLong { +		values, err = vc.ReadSignedLongs() +		log.PanicIf(err) +	} else if vc.tagType == TypeSignedRational { +		values, err = vc.ReadSignedRationals() +		log.PanicIf(err) +	} else if vc.tagType == TypeFloat { +		values, err = vc.ReadFloats() +		log.PanicIf(err) +	} else if vc.tagType == TypeDouble { +		values, err = vc.ReadDoubles() +		log.PanicIf(err) +	} else if vc.tagType == TypeUndefined { +		log.Panicf("will not parse undefined-type value") + +		// Never called. +		return nil, nil +	} else { +		log.Panicf("value of type [%s] is unparseable", vc.tagType) +		// Never called. +		return nil, nil +	} + +	return values, nil +} + +func init() { +	parser = new(Parser) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go new file mode 100644 index 000000000..2cd26cc7b --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go @@ -0,0 +1,273 @@ +package exifcommon + +import ( +	"bytes" +	"math" +	"reflect" +	"time" + +	"encoding/binary" + +	"github.com/dsoprea/go-logging" +) + +var ( +	typeEncodeLogger = log.NewLogger("exif.type_encode") +) + +// EncodedData encapsulates the compound output of an encoding operation. +type EncodedData struct { +	Type    TagTypePrimitive +	Encoded []byte + +	// TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing). +	UnitCount uint32 +} + +// ValueEncoder knows how to encode values of every type to bytes. +type ValueEncoder struct { +	byteOrder binary.ByteOrder +} + +// NewValueEncoder returns a new ValueEncoder. +func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder { +	return &ValueEncoder{ +		byteOrder: byteOrder, +	} +} + +func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) { +	ed.Type = TypeByte +	ed.Encoded = []byte(value) +	ed.UnitCount = uint32(len(value)) + +	return ed, nil +} + +func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) { +	ed.Type = TypeAscii + +	ed.Encoded = []byte(value) +	ed.Encoded = append(ed.Encoded, 0) + +	ed.UnitCount = uint32(len(ed.Encoded)) + +	return ed, nil +} + +// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing +// NUL byte. +// +// Note that: +// +// 1. This type can not be automatically encoded using `Encode()`. The default +//    mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only +//    certain undefined-type tags using an unterminated ASCII string and these +//    are exceptional in nature. +// +// 2. The presence of this method allows us to completely test the complimentary +//    no-nul parser. +// +func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) { +	ed.Type = TypeAsciiNoNul +	ed.Encoded = []byte(value) +	ed.UnitCount = uint32(len(ed.Encoded)) + +	return ed, nil +} + +func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) +	ed.Encoded = make([]byte, ed.UnitCount*2) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i]) +	} + +	ed.Type = TypeShort + +	return ed, nil +} + +func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) +	ed.Encoded = make([]byte, ed.UnitCount*4) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i]) +	} + +	ed.Type = TypeLong + +	return ed, nil +} + +func (ve *ValueEncoder) encodeFloats(value []float32) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) +	ed.Encoded = make([]byte, ed.UnitCount*4) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], math.Float32bits(value[i])) +	} + +	ed.Type = TypeFloat + +	return ed, nil +} + +func (ve *ValueEncoder) encodeDoubles(value []float64) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) +	ed.Encoded = make([]byte, ed.UnitCount*8) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		ve.byteOrder.PutUint64(ed.Encoded[i*8:(i+1)*8], math.Float64bits(value[i])) +	} + +	ed.Type = TypeDouble + +	return ed, nil +} + +func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) +	ed.Encoded = make([]byte, ed.UnitCount*8) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator) +		ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator) +	} + +	ed.Type = TypeRational + +	return ed, nil +} + +func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) + +	b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		err := binary.Write(b, ve.byteOrder, value[i]) +		log.PanicIf(err) +	} + +	ed.Type = TypeSignedLong +	ed.Encoded = b.Bytes() + +	return ed, nil +} + +func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	ed.UnitCount = uint32(len(value)) + +	b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) + +	for i := uint32(0); i < ed.UnitCount; i++ { +		err := binary.Write(b, ve.byteOrder, value[i].Numerator) +		log.PanicIf(err) + +		err = binary.Write(b, ve.byteOrder, value[i].Denominator) +		log.PanicIf(err) +	} + +	ed.Type = TypeSignedRational +	ed.Encoded = b.Bytes() + +	return ed, nil +} + +// Encode returns bytes for the given value, infering type from the actual +// value. This does not support `TypeAsciiNoNull` (all strings are encoded as +// `TypeAscii`). +func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) { +	defer func() { +		if state := recover(); state != nil { +			err = log.Wrap(state.(error)) +		} +	}() + +	switch t := value.(type) { +	case []byte: +		ed, err = ve.encodeBytes(t) +		log.PanicIf(err) +	case string: +		ed, err = ve.encodeAscii(t) +		log.PanicIf(err) +	case []uint16: +		ed, err = ve.encodeShorts(t) +		log.PanicIf(err) +	case []uint32: +		ed, err = ve.encodeLongs(t) +		log.PanicIf(err) +	case []float32: +		ed, err = ve.encodeFloats(t) +		log.PanicIf(err) +	case []float64: +		ed, err = ve.encodeDoubles(t) +		log.PanicIf(err) +	case []Rational: +		ed, err = ve.encodeRationals(t) +		log.PanicIf(err) +	case []int32: +		ed, err = ve.encodeSignedLongs(t) +		log.PanicIf(err) +	case []SignedRational: +		ed, err = ve.encodeSignedRationals(t) +		log.PanicIf(err) +	case time.Time: +		// For convenience, if the user doesn't want to deal with translation +		// semantics with timestamps. + +		s := ExifFullTimestampString(t) + +		ed, err = ve.encodeAscii(s) +		log.PanicIf(err) +	default: +		log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value) +	} + +	return ed, nil +}  | 
