protocol: packets operate on io.ReadWritter; now use a buffer pool to encode/decode packets

This commit is contained in:
CPunch 2023-03-10 19:56:05 -06:00
parent 5cc3f275c1
commit 985890d351
3 changed files with 63 additions and 81 deletions

View File

@ -16,61 +16,16 @@ import (
*/ */
type Packet struct { type Packet struct {
ByteOrder binary.ByteOrder readWriter io.ReadWriter
Buf []byte
} }
func NewPacket(buf []byte) *Packet { func NewPacket(readWriter io.ReadWriter) *Packet {
pkt := &Packet{ pkt := &Packet{
ByteOrder: binary.LittleEndian, readWriter: readWriter,
Buf: buf,
} }
return pkt return pkt
} }
func (pkt *Packet) writeRaw(data []byte) {
pkt.Buf = append(pkt.Buf, data...)
}
func (pkt *Packet) Write(data []byte) (int, error) {
pkt.writeRaw(data)
if len(pkt.Buf) > CN_PACKET_BUFFER_SIZE {
return 0, fmt.Errorf("Failed to write to packet, invalid size!")
}
return len(data), nil
}
func (pkt *Packet) writeByte(data byte) {
pkt.Write([]byte{data})
}
func (pkt *Packet) readRaw(data []byte) (int, error) {
sz := copy(data, pkt.Buf)
pkt.Buf = pkt.Buf[sz:]
if sz != len(data) {
return sz, io.EOF
}
return sz, nil
}
func (pkt *Packet) Read(data []byte) (int, error) {
if len(data) > len(pkt.Buf) {
return 0, fmt.Errorf("Failed to read from packet, invalid size!")
}
return pkt.readRaw(data)
}
func (pkt *Packet) readByte() byte {
data := pkt.Buf[0]
pkt.Buf = pkt.Buf[1:]
return data
}
func (pkt *Packet) encodeStructField(field reflect.StructField, value reflect.Value) { func (pkt *Packet) encodeStructField(field reflect.StructField, value reflect.Value) {
log.Printf("Encoding '%s'", field.Name) log.Printf("Encoding '%s'", field.Name)
@ -89,13 +44,14 @@ func (pkt *Packet) encodeStructField(field reflect.StructField, value reflect.Va
buf16 = buf16[:sz] buf16 = buf16[:sz]
} else { } else {
// grow // grow
// TODO: probably a better way to do this?
for len(buf16) < sz { for len(buf16) < sz {
buf16 = append(buf16, 0) buf16 = append(buf16, 0)
} }
} }
// write // write
binary.Write(pkt, pkt.ByteOrder, buf16) binary.Write(pkt.readWriter, binary.LittleEndian, buf16)
default: default:
pkt.Encode(value.Addr().Interface()) pkt.Encode(value.Addr().Interface())
} }
@ -104,7 +60,7 @@ func (pkt *Packet) encodeStructField(field reflect.StructField, value reflect.Va
pad, err := strconv.Atoi(field.Tag.Get("pad")) pad, err := strconv.Atoi(field.Tag.Get("pad"))
if err == nil { if err == nil {
for i := 0; i < pad; i++ { for i := 0; i < pad; i++ {
pkt.writeByte(0) pkt.readWriter.Write([]byte{0})
} }
} }
} }
@ -121,7 +77,7 @@ func (pkt *Packet) Encode(data interface{}) {
} }
default: default:
// we pass everything else to go's binary package // we pass everything else to go's binary package
binary.Write(pkt, pkt.ByteOrder, data) binary.Write(pkt.readWriter, binary.LittleEndian, data)
} }
} }
@ -136,7 +92,7 @@ func (pkt *Packet) decodeStructField(field reflect.StructField, value reflect.Va
} }
buf16 := make([]uint16, sz) buf16 := make([]uint16, sz)
binary.Read(pkt, pkt.ByteOrder, buf16) binary.Read(pkt.readWriter, binary.LittleEndian, buf16)
// find null terminator // find null terminator
var realSize int var realSize int
@ -151,11 +107,11 @@ func (pkt *Packet) decodeStructField(field reflect.StructField, value reflect.Va
pkt.Decode(value.Addr().Interface()) pkt.Decode(value.Addr().Interface())
} }
// read padding bytes // consume padding bytes
pad, err := strconv.Atoi(field.Tag.Get("pad")) pad, err := strconv.Atoi(field.Tag.Get("pad"))
if err == nil { if err == nil {
for i := 0; i < pad; i++ { for i := 0; i < pad; i++ {
pkt.readByte() pkt.readWriter.Read([]byte{0})
} }
} }
} }
@ -171,6 +127,6 @@ func (pkt *Packet) Decode(data interface{}) {
pkt.decodeStructField(rv.Type().Field(i), rv.Field(i)) pkt.decodeStructField(rv.Type().Field(i), rv.Field(i))
} }
default: default:
binary.Read(pkt, pkt.ByteOrder, data) binary.Read(pkt.readWriter, binary.LittleEndian, data)
} }
} }

19
protocol/pool/pool.go Normal file
View File

@ -0,0 +1,19 @@
package pool
import (
"bytes"
"sync"
)
var allocator = sync.Pool{
New: func() any { return new(bytes.Buffer) },
}
func Get() *bytes.Buffer {
return allocator.Get().(*bytes.Buffer)
}
func Put(buf *bytes.Buffer) {
buf.Reset()
allocator.Put(buf)
}

View File

@ -3,11 +3,13 @@ package server
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io"
"log" "log"
"net" "net"
"github.com/CPunch/gopenfusion/db" "github.com/CPunch/gopenfusion/db"
"github.com/CPunch/gopenfusion/protocol" "github.com/CPunch/gopenfusion/protocol"
"github.com/CPunch/gopenfusion/protocol/pool"
) )
const ( const (
@ -48,32 +50,32 @@ func NewPeer(handler PeerHandler, conn net.Conn) *Peer {
} }
func (client *Peer) Send(data interface{}, typeID uint32) { func (client *Peer) Send(data interface{}, typeID uint32) {
buf := pool.Get()
defer func() { // always return the buffer to the pool
pool.Put(buf)
}()
// encode // encode
pkt := protocol.NewPacket(make([]byte, 0)) pkt := protocol.NewPacket(buf)
// write the typeID and packet body
pkt.Encode(uint32(typeID))
pkt.Encode(data) pkt.Encode(data)
log.Printf("Sending %#v, sizeof: %d", data, len(pkt.Buf))
// write packet size // write the packet size
tmp := make([]byte, 4) binary.Write(client.conn, binary.LittleEndian, uint32(buf.Len()))
binary.LittleEndian.PutUint32(tmp, uint32(len(pkt.Buf)+4))
if _, err := client.conn.Write(tmp); err != nil {
panic(fmt.Errorf("[FATAL] failed to write packet size! %v", err))
}
// prepend the typeID to the packet body
binary.LittleEndian.PutUint32(tmp, uint32(typeID))
tmp = append(tmp, pkt.Buf...)
// encrypt typeID & body // encrypt typeID & body
switch client.whichKey { switch client.whichKey {
case USE_E: case USE_E:
protocol.EncryptData(tmp, client.E_key) protocol.EncryptData(buf.Bytes(), client.E_key)
case USE_FE: case USE_FE:
protocol.EncryptData(tmp, client.FE_key) protocol.EncryptData(buf.Bytes(), client.FE_key)
} }
// write packet body // write packet type && packet body
if _, err := client.conn.Write(tmp); err != nil { log.Printf("Sending %#v, sizeof: %d", data, buf.Len())
if _, err := client.conn.Write(buf.Bytes()); err != nil {
panic(fmt.Errorf("[FATAL] failed to write packet body! %v", err)) panic(fmt.Errorf("[FATAL] failed to write packet body! %v", err))
} }
} }
@ -96,13 +98,12 @@ func (client *Peer) ClientHandler() {
client.Kill() client.Kill()
}() }()
tmp := make([]byte, 4, protocol.CN_PACKET_BUFFER_SIZE)
for { for {
// read packet size // read packet size
if _, err := client.conn.Read(tmp); err != nil { var sz uint32
if err := binary.Read(client.conn, binary.LittleEndian, &sz); err != nil {
panic(fmt.Errorf("[FATAL] failed to read packet size! %v", err)) panic(fmt.Errorf("[FATAL] failed to read packet size! %v", err))
} }
sz := int(binary.LittleEndian.Uint32(tmp))
// client should never send a packet size outside of this range // client should never send a packet size outside of this range
if sz > protocol.CN_PACKET_BUFFER_SIZE || sz < 4 { if sz > protocol.CN_PACKET_BUFFER_SIZE || sz < 4 {
@ -110,20 +111,26 @@ func (client *Peer) ClientHandler() {
} }
// read packet body // read packet body
if _, err := client.conn.Read(tmp[:sz]); err != nil { buf := pool.Get()
if _, err := buf.ReadFrom(io.LimitReader(client.conn, int64(sz))); err != nil {
panic(fmt.Errorf("[FATAL] failed to read packet body! %v", err)) panic(fmt.Errorf("[FATAL] failed to read packet body! %v", err))
} }
// decrypt && grab typeID fmt.Printf("%#v", buf)
protocol.DecryptData(tmp[:sz], client.E_key)
typeID := uint32(binary.LittleEndian.Uint32(tmp[:4])) // decrypt
protocol.DecryptData(buf.Bytes(), client.E_key)
// create packet && read typeID
var typeID uint32
pkt := protocol.NewPacket(buf)
pkt.Decode(&typeID)
// dispatch packet // dispatch packet
log.Printf("Got packet ID: %x, with a sizeof: %d\n", typeID, sz) log.Printf("Got packet ID: %x, with a sizeof: %d\n", typeID, sz)
pkt := protocol.NewPacket(tmp[4:sz])
client.handler.HandlePacket(client, typeID, pkt) client.handler.HandlePacket(client, typeID, pkt)
// reset tmp // restore buffer to pool
tmp = tmp[:4] pool.Put(buf)
} }
} }