Run frontend and backend in a separate process

This will in the future allow the server to be restarted without closing open connections
This commit is contained in:
mkwcat 2024-04-22 10:16:27 -04:00
parent 5bdd9ddf0c
commit ddbf445a0d
No known key found for this signature in database
GPG Key ID: 7A505679CE9E7AA9
8 changed files with 461 additions and 130 deletions

49
common/connection.go Normal file
View File

@ -0,0 +1,49 @@
package common
import (
"net/rpc"
"wwfc/logging"
)
var rpcFrontend *rpc.Client
type RPCFrontendPacket struct {
Server string
Index uint64
Data []byte
}
// ConnectFrontend connects to the frontend RPC server
func ConnectFrontend() {
var err error
rpcFrontend, err = rpc.Dial("tcp", "localhost:29998")
if err != nil {
logging.Error("BACKEND", "Failed to connect to frontend:", err)
}
}
// SendPacket is used by backend servers to send a packet to a connection
func SendPacket(server string, index uint64, data []byte) error {
if rpcFrontend == nil {
ConnectFrontend()
}
err := rpcFrontend.Call("RPCFrontendPacket.SendPacket", RPCFrontendPacket{Server: server, Index: index, Data: data}, nil)
if err != nil {
logging.Error("COMMON", "Failed to send packet to frontend:", err)
}
return err
}
// CloseConnection is used by backend servers to close a connection
func CloseConnection(server string, index uint64) error {
if rpcFrontend == nil {
ConnectFrontend()
}
err := rpcFrontend.Call("RPCFrontendPacket.CloseConnection", RPCFrontendPacket{Server: server, Index: index}, nil)
if err != nil {
logging.Error("COMMON", "Failed to close connection:", err)
}
return err
}

View File

@ -409,9 +409,9 @@ func (g *GameSpySession) replyError(err GPError) {
if !g.LoginInfoSet {
msg := err.GetMessage()
// logging.Info(g.ModuleName, "Sending error message:", msg)
g.Conn.Write([]byte(msg))
common.SendPacket("gpcm", g.ConnIndex, []byte(msg))
if err.Fatal {
g.Conn.Close()
common.CloseConnection("gpcm", g.ConnIndex)
}
return
}
@ -423,8 +423,8 @@ func (g *GameSpySession) replyError(err GPError) {
msg := err.GetMessageTranslate(g.GameName, g.Region, g.Language, g.ConsoleFriendCode, deviceId)
// logging.Info(g.ModuleName, "Sending error message:", msg)
g.Conn.Write([]byte(msg))
common.SendPacket("gpcm", g.ConnIndex, []byte(msg))
if err.Fatal {
g.Conn.Close()
common.CloseConnection("gpcm", g.ConnIndex)
}
}

View File

@ -241,7 +241,7 @@ func sendMessageToSession(msgType string, from uint32, session *GameSpySession,
"msg": msg,
},
})
session.Conn.Write([]byte(message))
common.SendPacket("gpcm", session.ConnIndex, []byte(message))
}
func sendMessageToSessionBuffer(msgType string, from uint32, session *GameSpySession, msg string) {

View File

@ -1,5 +1,7 @@
package gpcm
import "wwfc/common"
func kickPlayer(profileID uint32, reason string) {
if session, exists := sessions[profileID]; exists {
errorMessage := WWFCMsgKickedGeneric
@ -25,7 +27,7 @@ func kickPlayer(profileID uint32, reason string) {
case "network_error":
// No error message
session.Conn.Close()
common.CloseConnection("gpcm", session.ConnIndex)
return
}
@ -35,7 +37,7 @@ func kickPlayer(profileID uint32, reason string) {
Fatal: true,
WWFCMessage: errorMessage,
})
session.Conn.Close()
common.CloseConnection("gpcm", session.ConnIndex)
}
}

View File

@ -269,7 +269,7 @@ func (g *GameSpySession) login(command common.GameSpyCommand) {
otherSession, exists := sessions[g.User.ProfileId]
if exists {
otherSession.replyError(ErrForcedDisconnect)
otherSession.Conn.Close()
common.CloseConnection("gpcm", otherSession.ConnIndex)
for i := 0; ; i++ {
mutex.Unlock()
@ -302,7 +302,7 @@ func (g *GameSpySession) login(command common.GameSpyCommand) {
g.ModuleName += "/" + common.CalcFriendCodeString(g.User.ProfileId, g.User.GsbrCode[:4])
// Notify QR2 of the login
qr2.Login(g.User.ProfileId, gamecd, ingamesn, cfc, g.User.GsbrCode[:4], g.Conn.RemoteAddr().String(), g.NeedsExploit, g.DeviceAuthenticated, g.User.Restricted, KickPlayer)
qr2.Login(g.User.ProfileId, gamecd, ingamesn, cfc, g.User.GsbrCode[:4], g.RemoteAddr, g.NeedsExploit, g.DeviceAuthenticated, g.User.Restricted, KickPlayer)
replyUserId := g.User.UserId
if g.UnitCode == UnitCodeDS {
@ -336,19 +336,7 @@ func (g *GameSpySession) login(command common.GameSpyCommand) {
OtherValues: otherValues,
})
g.Conn.Write([]byte(payload))
// Now start sending keep alive packets every 5 minutes
go func() {
for {
time.Sleep(5 * time.Minute)
if !g.LoggedIn {
return
}
g.Conn.Write([]byte(`\ka\\final\`))
}
}()
common.SendPacket("gpcm", g.ConnIndex, []byte(payload))
}
func (g *GameSpySession) exLogin(command common.GameSpyCommand) {
@ -439,7 +427,7 @@ func (g *GameSpySession) verifyExLoginInfo(command common.GameSpyCommand, authTo
func (g *GameSpySession) performLoginWithDatabase(userId uint64, gsbrCode string, profileId uint32, deviceId uint32) bool {
// Get IP address without port
ipAddress := g.Conn.RemoteAddr().String()
ipAddress := g.RemoteAddr
if strings.Contains(ipAddress, ":") {
ipAddress = ipAddress[:strings.Index(ipAddress, ":")]
}

View File

@ -1,12 +1,8 @@
package gpcm
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net"
"wwfc/common"
"wwfc/database"
"wwfc/logging"
@ -18,7 +14,8 @@ import (
)
type GameSpySession struct {
Conn net.Conn
ConnIndex uint64
RemoteAddr string
WriteBuffer string
User database.User
ModuleName string
@ -59,8 +56,9 @@ var (
ctx = context.Background()
pool *pgxpool.Pool
// I would use a sync.Map instead of the map mutex combo, but this performs better.
sessions = map[uint32]*GameSpySession{}
mutex = deadlock.Mutex{}
sessions = map[uint32]*GameSpySession{}
sessionsByConnIndex = map[uint64]*GameSpySession{}
mutex = deadlock.Mutex{}
allowDefaultDolphinKeys bool
)
@ -84,58 +82,44 @@ func StartServer() {
database.UpdateTables(pool, ctx)
allowDefaultDolphinKeys = config.AllowDefaultDolphinKeys
address := *config.GameSpyAddress + ":29900"
l, err := net.Listen("tcp", address)
if err != nil {
panic(err)
}
// Close the listener when the application closes.
defer l.Close()
logging.Notice("GPCM", "Listening on", address)
for {
// Listen for an incoming connection.
conn, err := l.Accept()
if err != nil {
panic(err)
}
// Handle connections in a new goroutine.
go handleRequest(conn)
}
}
func (g *GameSpySession) closeSession() {
if r := recover(); r != nil {
logging.Error(g.ModuleName, "Panic:", r)
func CloseConnection(index uint64) {
mutex.Lock()
session := sessionsByConnIndex[index]
mutex.Unlock()
if session == nil {
logging.Error("GPCM", "Cannot find session for this connection index:", aurora.Cyan(index))
return
}
if g.LoggedIn {
qr2.Logout(g.User.ProfileId)
if g.QR2IP != 0 {
qr2.ProcessGPStatusUpdate(g.User.ProfileId, g.QR2IP, "0")
logging.Notice(session.ModuleName, "Connection closed")
if session.LoggedIn {
qr2.Logout(session.User.ProfileId)
if session.QR2IP != 0 {
qr2.ProcessGPStatusUpdate(session.User.ProfileId, session.QR2IP, "0")
}
g.sendLogoutStatus()
session.sendLogoutStatus()
}
mutex.Lock()
defer mutex.Unlock()
g.Conn.Close()
if g.LoggedIn {
g.LoggedIn = false
delete(sessions, g.User.ProfileId)
if session.LoggedIn {
session.LoggedIn = false
delete(sessions, session.User.ProfileId)
}
}
// Handles incoming requests.
func handleRequest(conn net.Conn) {
func NewConnection(index uint64, address string) {
session := &GameSpySession{
Conn: conn,
ConnIndex: index,
RemoteAddr: address,
User: database.User{},
ModuleName: "GPCM:" + conn.RemoteAddr().String(),
ModuleName: "GPCM:" + address,
LoggedIn: false,
Challenge: "",
StatusSet: false,
@ -145,16 +129,9 @@ func handleRequest(conn net.Conn) {
AuthFriendList: []uint32{},
}
defer session.closeSession()
// Set session ID and challenge
// Set challenge
session.Challenge = common.RandomString(10)
err := conn.(*net.TCPConn).SetKeepAlive(true)
if err != nil {
logging.Error(session.ModuleName, "Unable to set keepalive:", err.Error())
}
payload := common.CreateGameSpyMessage(common.GameSpyCommand{
Command: "lc",
CommandValue: "1",
@ -163,66 +140,70 @@ func handleRequest(conn net.Conn) {
"id": "1",
},
})
conn.Write([]byte(payload))
common.SendPacket("gpcm", index, []byte(payload))
logging.Notice(session.ModuleName, "Connection established from", conn.RemoteAddr())
logging.Notice(session.ModuleName, "Connection established from", address)
// Here we go into the listening loop
for {
// TODO: Handle split packets
buffer := make([]byte, 1024)
n, err := bufio.NewReader(conn).Read(buffer)
if err != nil {
if errors.Is(err, io.EOF) {
// Client closed connection, terminate.
logging.Info(session.ModuleName, "Client closed connection")
return
}
mutex.Lock()
sessionsByConnIndex[index] = session
mutex.Unlock()
}
logging.Error(session.ModuleName, "Connection lost")
return
func HandlePacket(index uint64, data []byte) {
mutex.Lock()
session := sessionsByConnIndex[index]
mutex.Unlock()
if session == nil {
logging.Error("GPCM", "Cannot find session for this connection index:", aurora.Cyan(index))
return
}
defer func() {
if r := recover(); r != nil {
logging.Error(session.ModuleName, "Panic:", r)
}
}()
commands, err := common.ParseGameSpyMessage(string(buffer[:n]))
if err != nil {
logging.Error(session.ModuleName, "Error parsing message:", err.Error())
logging.Error(session.ModuleName, "Raw data:", string(buffer[:n]))
session.replyError(ErrParse)
return
}
commands, err := common.ParseGameSpyMessage(string(data))
if err != nil {
logging.Error(session.ModuleName, "Error parsing message:", err.Error())
logging.Error(session.ModuleName, "Raw data:", string(data))
session.replyError(ErrParse)
return
}
// Commands must be handled in a certain order, not in the order supplied by the client
// Commands must be handled in a certain order, not in the order supplied by the client
commands = session.handleCommand("ka", commands, func(command common.GameSpyCommand) {
session.Conn.Write([]byte(`\ka\\final\`))
})
commands = session.handleCommand("login", commands, session.login)
commands = session.handleCommand("wwfc_exlogin", commands, session.exLogin)
commands = session.ignoreCommand("logout", commands)
commands = session.handleCommand("ka", commands, func(command common.GameSpyCommand) {
common.SendPacket("gpcm", session.ConnIndex, []byte(`\ka\\final\`))
})
commands = session.handleCommand("login", commands, session.login)
commands = session.handleCommand("wwfc_exlogin", commands, session.exLogin)
commands = session.ignoreCommand("logout", commands)
if len(commands) != 0 && !session.LoggedIn {
logging.Error(session.ModuleName, "Attempt to run command before login:", aurora.Cyan(commands[0]))
session.replyError(ErrNotLoggedIn)
return
}
if len(commands) != 0 && !session.LoggedIn {
logging.Error(session.ModuleName, "Attempt to run command before login:", aurora.Cyan(commands[0]))
session.replyError(ErrNotLoggedIn)
return
}
commands = session.handleCommand("wwfc_report", commands, session.handleWWFCReport)
commands = session.handleCommand("updatepro", commands, session.updateProfile)
commands = session.handleCommand("status", commands, session.setStatus)
commands = session.handleCommand("addbuddy", commands, session.addFriend)
commands = session.handleCommand("delbuddy", commands, session.removeFriend)
commands = session.handleCommand("authadd", commands, session.authAddFriend)
commands = session.handleCommand("bm", commands, session.bestieMessage)
commands = session.handleCommand("getprofile", commands, session.getProfile)
commands = session.handleCommand("wwfc_report", commands, session.handleWWFCReport)
commands = session.handleCommand("updatepro", commands, session.updateProfile)
commands = session.handleCommand("status", commands, session.setStatus)
commands = session.handleCommand("addbuddy", commands, session.addFriend)
commands = session.handleCommand("delbuddy", commands, session.removeFriend)
commands = session.handleCommand("authadd", commands, session.authAddFriend)
commands = session.handleCommand("bm", commands, session.bestieMessage)
commands = session.handleCommand("getprofile", commands, session.getProfile)
for _, command := range commands {
logging.Error(session.ModuleName, "Unknown command:", aurora.Cyan(command))
}
for _, command := range commands {
logging.Error(session.ModuleName, "Unknown command:", aurora.Cyan(command))
}
if session.WriteBuffer != "" {
conn.Write([]byte(session.WriteBuffer))
session.WriteBuffer = ""
}
if session.WriteBuffer != "" {
common.SendPacket("gpcm", session.ConnIndex, []byte(session.WriteBuffer))
session.WriteBuffer = ""
}
}

View File

@ -163,7 +163,7 @@ func (g *GameSpySession) bestieMessage(command common.GameSpyCommand) {
}
if cmd == common.MatchReservation {
if common.IPFormatNoPortToInt(g.Conn.RemoteAddr().String()) != int32(msgMatchData.Reservation.PublicIP) {
if common.IPFormatNoPortToInt(g.RemoteAddr) != int32(msgMatchData.Reservation.PublicIP) {
logging.Error(g.ModuleName, "RESERVATION: Public IP mismatch")
g.replyError(ErrMessage)
return
@ -172,7 +172,7 @@ func (g *GameSpySession) bestieMessage(command common.GameSpyCommand) {
g.QR2IP = uint64(msgMatchData.Reservation.PublicIP) | (uint64(msgMatchData.Reservation.PublicPort) << 32)
} else if cmd == common.MatchResvOK {
if common.IPFormatNoPortToInt(g.Conn.RemoteAddr().String()) != int32(msgMatchData.ResvOK.PublicIP) {
if common.IPFormatNoPortToInt(g.RemoteAddr) != int32(msgMatchData.ResvOK.PublicIP) {
logging.Error(g.ModuleName, "RESV_OK: Public IP mismatch")
g.replyError(ErrMessage)
return
@ -204,7 +204,7 @@ func (g *GameSpySession) bestieMessage(command common.GameSpyCommand) {
return
}
sameAddress := strings.Split(g.Conn.RemoteAddr().String(), ":")[0] == strings.Split(toSession.Conn.RemoteAddr().String(), ":")[0]
sameAddress := strings.Split(g.RemoteAddr, ":")[0] == strings.Split(toSession.RemoteAddr, ":")[0]
if cmd == common.MatchReservation {
if g.QR2IP == 0 {
@ -360,7 +360,7 @@ func (g *GameSpySession) bestieMessage(command common.GameSpyCommand) {
},
})
toSession.Conn.Write([]byte(message))
common.SendPacket("gpcm", toSession.ConnIndex, []byte(message))
// Append sender's profile ID to dest's RecvStatusFromList
toSession.RecvStatusFromList = append(toSession.RecvStatusFromList, g.User.ProfileId)

317
main.go
View File

@ -1,6 +1,12 @@
package main
import (
"errors"
"net"
"net/rpc"
"os"
"os/exec"
"strconv"
"sync"
"wwfc/api"
"wwfc/common"
@ -13,14 +19,65 @@ import (
"wwfc/qr2"
"wwfc/sake"
"wwfc/serverbrowser"
"github.com/logrusorgru/aurora/v3"
)
var config = common.GetConfig()
func main() {
config := common.GetConfig()
logging.SetLevel(*config.LogLevel)
if err := logging.SetOutput(config.LogOutput); err != nil {
logging.Error("MAIN", err)
args := os.Args[1:]
// Separate frontend and backend into two separate processes.
// This is to allow restarting the backend without closing all connections.
// Start the backend instead of the frontend if the first argument is "backend"
if len(args) > 0 && args[0] == "backend" {
backendMain()
} else {
frontendMain()
}
}
type RPCPacket struct {
Server string
Index uint64
Address string
Data []byte
}
// backendMain starts all the servers and creates an RPC server to communicate with the frontend
func backendMain() {
if err := logging.SetOutput(config.LogOutput); err != nil {
logging.Error("BACKEND", err)
}
rpc.Register(&RPCPacket{})
address := "localhost:29999"
l, err := net.Listen("tcp", address)
if err != nil {
logging.Error("BACKEND", "Failed to listen on", aurora.BrightCyan(address))
os.Exit(1)
}
logging.Notice("BACKEND", "Listening on", aurora.BrightCyan(address))
go func() {
for {
conn, err := l.Accept()
if err != nil {
logging.Error("BACKEND", "Failed to accept connection on", aurora.BrightCyan(address))
continue
}
go rpc.ServeConn(conn)
}
}()
// TODO: Wait until the servers are started before allowing in connections
wg := &sync.WaitGroup{}
actions := []func(){nas.StartServer, gpcm.StartServer, qr2.StartServer, gpsp.StartServer, serverbrowser.StartServer, sake.StartServer, natneg.StartServer, api.StartServer, gamestats.StartServer}
@ -34,3 +91,257 @@ func main() {
wg.Wait()
}
// RPCPacket.NewConnection is called by the frontend to notify the backend of a new connection
func (r *RPCPacket) NewConnection(args RPCPacket, _ *struct{}) error {
switch args.Server {
case "gpcm":
gpcm.NewConnection(args.Index, args.Address)
}
return nil
}
// RPCPacket.HandlePacket is called by the frontend to forward a packet to the backend
func (r *RPCPacket) HandlePacket(args RPCPacket, _ *struct{}) error {
switch args.Server {
case "gpcm":
gpcm.HandlePacket(args.Index, args.Data)
}
return nil
}
// rpcPacket.closeConnection is called by the frontend to notify the backend of a closed connection
func (r *RPCPacket) CloseConnection(args RPCPacket, _ *struct{}) error {
switch args.Server {
case "gpcm":
gpcm.CloseConnection(args.Index)
}
return nil
}
type serverInfo struct {
rpcName string
protocol string
port int
}
type RPCFrontendPacket struct {
Server string
Index uint64
Data []byte
}
var (
rpcClient *rpc.Client
rpcMutex sync.Mutex
rpcBusyCount sync.WaitGroup
connections = map[string]map[uint64]net.Conn{}
)
// frontendMain starts the backend process and communicates with it using RPC
func frontendMain() {
// Don't allow the frontend to output to a file (there's no reason to)
logOutput := config.LogOutput
if logOutput == "StdOutAndFile" {
logOutput = "StdOut"
}
if err := logging.SetOutput(logOutput); err != nil {
logging.Error("FRONTEND", err)
}
rpcMutex.Lock()
startFrontendServer()
go startBackendProcess()
servers := []serverInfo{
// {rpcName: "serverbrowser", protocol: "tcp", port: 28910},
{rpcName: "gpcm", protocol: "tcp", port: 29900},
// {rpcName: "gpsp", protocol: "tcp", port: 29901},
// {rpcName: "gamestats", protocol: "tcp", port: 29920},
}
for _, server := range servers {
connections[server.rpcName] = map[uint64]net.Conn{}
go frontendListen(server)
}
// Prevent application from exiting
select {}
}
// startFrontendServer starts the frontend RPC server.
func startFrontendServer() {
rpc.Register(&RPCFrontendPacket{})
address := "localhost:29998"
l, err := net.Listen("tcp", address)
if err != nil {
logging.Error("FRONTEND", "Failed to listen on", aurora.BrightCyan(address))
os.Exit(1)
}
logging.Notice("FRONTEND", "Listening on", aurora.BrightCyan(address))
go func() {
for {
conn, err := l.Accept()
if err != nil {
logging.Error("FRONTEND", "Failed to accept connection on", aurora.BrightCyan(address))
continue
}
go rpc.ServeConn(conn)
}
}()
}
// startBackendProcess starts the backend process and waits for the RPC server to start.
// Expects the RPC mutex to be locked.
func startBackendProcess() {
cmd := exec.Command(os.Args[0], "backend")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
logging.Error("FRONTEND", "Failed to start backend process:", err)
os.Exit(1)
}
for {
client, err := rpc.Dial("tcp", "localhost:29999")
if err == nil {
rpcClient = client
rpcMutex.Unlock()
break
}
}
}
// frontendListen listens on the specified port and forwards each packet to the backend
func frontendListen(server serverInfo) {
address := *config.GameSpyAddress + ":" + strconv.Itoa(server.port)
l, err := net.Listen(server.protocol, address)
if err != nil {
logging.Error("FRONTEND", "Failed to listen on", aurora.BrightCyan(address))
return
}
logging.Notice("FRONTEND", "Listening on", aurora.BrightCyan(address), "for", aurora.BrightCyan(server.rpcName))
// Increment by 1 for each connection, never decrement. Unlikely to overflow but it doesn't matter if it does.
count := uint64(0)
for {
conn, err := l.Accept()
if err != nil {
logging.Error("FRONTEND", "Failed to accept connection on", aurora.BrightCyan(address))
continue
}
if server.protocol == "tcp" {
err := conn.(*net.TCPConn).SetKeepAlive(true)
if err != nil {
logging.Warn("FRONTEND", "Unable to set keepalive", err.Error())
}
}
count++
go handleConnection(server, conn, count)
}
}
// handleConnection forwards packets between the frontend and backend
func handleConnection(server serverInfo, conn net.Conn, index uint64) {
defer conn.Close()
rpcMutex.Lock()
rpcBusyCount.Add(1)
connections[server.rpcName][index] = conn
rpcMutex.Unlock()
err := rpcClient.Call("RPCPacket.NewConnection", RPCPacket{Server: server.rpcName, Index: index, Address: conn.RemoteAddr().String(), Data: []byte{}}, nil)
rpcBusyCount.Done()
if err != nil {
logging.Error("FRONTEND", "Failed to forward new connection to backend:", err)
rpcMutex.Lock()
delete(connections[server.rpcName], index)
rpcMutex.Unlock()
return
}
for {
buffer := make([]byte, 1024)
n, err := conn.Read(buffer)
if err != nil {
break
}
rpcMutex.Lock()
rpcBusyCount.Add(1)
rpcMutex.Unlock()
// Forward the packet to the backend
err = rpcClient.Call("RPCPacket.HandlePacket", RPCPacket{Server: server.rpcName, Index: index, Address: conn.RemoteAddr().String(), Data: buffer[:n]}, nil)
rpcBusyCount.Done()
if err != nil {
logging.Error("FRONTEND", "Failed to forward packet to backend:", err)
break
}
}
rpcMutex.Lock()
rpcBusyCount.Add(1)
delete(connections[server.rpcName], index)
rpcMutex.Unlock()
err = rpcClient.Call("RPCPacket.CloseConnection", RPCPacket{Server: server.rpcName, Index: index, Address: conn.RemoteAddr().String(), Data: []byte{}}, nil)
rpcBusyCount.Done()
if err != nil {
logging.Error("FRONTEND", "Failed to forward close connection to backend:", err)
}
}
var ErrBadIndex = errors.New("incorrect connection index")
// RPCFrontendPacket.SendPacket is called by the backend to send a packet to a connection
func (r *RPCFrontendPacket) SendPacket(args RPCFrontendPacket, _ *struct{}) error {
rpcMutex.Lock()
defer rpcMutex.Unlock()
conn, ok := connections[args.Server][args.Index]
if !ok {
return ErrBadIndex
}
_, err := conn.Write(args.Data)
return err
}
// RPCFrontendPacket.CloseConnection is called by the backend to close a connection
func (r *RPCFrontendPacket) CloseConnection(args RPCFrontendPacket, _ *struct{}) error {
rpcMutex.Lock()
defer rpcMutex.Unlock()
conn, ok := connections[args.Server][args.Index]
if !ok {
return ErrBadIndex
}
delete(connections[args.Server], args.Index)
return conn.Close()
}