2019-05-11 06:54:12 +02:00
package main
import (
2019-06-11 17:39:04 +02:00
"archive/zip"
"bytes"
2020-01-12 10:17:14 +01:00
"compress/zlib"
2020-01-15 11:21:15 +01:00
"crypto/aes"
2020-01-15 11:17:38 +01:00
"crypto/sha256"
"encoding/binary"
2020-01-15 11:58:49 +01:00
"encoding/hex"
2019-05-11 06:55:05 +02:00
"encoding/json"
2019-12-13 12:45:54 +01:00
"errors"
2019-05-11 06:54:12 +02:00
"fmt"
2019-06-11 17:39:04 +02:00
"io/ioutil"
2019-06-11 17:53:29 +02:00
"log"
2019-06-11 17:40:24 +02:00
"net/http"
2019-10-09 04:10:15 +02:00
"regexp"
2019-05-27 05:11:16 +02:00
"strconv"
2019-06-11 16:56:48 +02:00
"strings"
2019-05-11 06:55:05 +02:00
"time"
2019-06-11 17:55:31 +02:00
tb "gopkg.in/tucnak/telebot.v2"
2019-05-11 06:54:12 +02:00
)
2020-01-02 13:05:33 +01:00
func createJob ( jobTypeID64 int64 , priority int32 , userID64 int64 , trigger int64 , schedule time . Time , payload [ ] byte ) ( int64 , error ) {
2020-01-12 10:20:36 +01:00
2020-01-21 07:18:23 +01:00
var (
zb bytes . Buffer
zpayload [ ] byte
zipped int
)
if len ( payload ) > 10000 {
zw := zlib . NewWriter ( & zb )
zw . Write ( payload )
zw . Close ( )
zpayload = zb . Bytes ( )
zipped = 1
} else {
zpayload = payload
zipped = 0
}
2020-01-12 10:11:34 +01:00
if len ( zpayload ) > 20000 {
2019-12-13 12:45:08 +01:00
return 0 , errors . New ( "payload too long" )
}
2019-06-11 16:56:48 +02:00
stmt , err := db . Prepare ( ` INSERT INTO obj ( obj_type_id , obj_sub_type_id )
VALUES ( ? , ? ) ; ` )
logOnError ( err , "createJob : prepare insert obj" )
if err != nil {
return 0 , err
}
defer stmt . Close ( )
2019-05-27 05:08:10 +02:00
2020-01-02 16:33:41 +01:00
res , err := stmt . Exec ( cacheObjType [ ` job ` ] , jobTypeID64 )
s := fmt . Sprintf ( "createJob, insert obj(%d, %d)" , cacheObjType [ ` job ` ] , jobTypeID64 )
2019-06-11 16:56:48 +02:00
logOnError ( err , s )
if err != nil {
return 0 , err
}
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
objId , err := res . LastInsertId ( )
logOnError ( err , "createJob : get last insert Id" )
if err != nil {
return 0 , err
}
2019-05-27 05:08:10 +02:00
2020-01-21 07:18:23 +01:00
stmt , err = db . Prepare ( ` INSERT INTO obj_job ( obj_id , priority , user_id , trigger_id , seq_nr , schedule , is_done , in_work , inserted , timeout , pulled , started , ended , zipped , payload )
VALUES ( ? , ? , ? , ? , NULL , ? , 0 , 0 , ? , ? , NULL , NULL , NULL , ? , ? ) ; ` )
2019-06-11 16:56:48 +02:00
logOnError ( err , "createJob : prepare insert obj_job" )
if err != nil {
return 0 , err
}
defer stmt . Close ( )
2019-05-27 05:08:10 +02:00
2020-01-21 07:18:23 +01:00
_ , err = stmt . Exec ( objId , priority , userID64 , trigger , schedule . UTC ( ) , time . Now ( ) . UTC ( ) , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) , zipped , zpayload )
2019-06-11 16:56:48 +02:00
logOnError ( err , "createJob : insert obj_job" )
if err != nil {
return 0 , err
}
2019-05-27 05:08:10 +02:00
2020-01-12 10:32:31 +01:00
j := new ( Job )
j . ID64 = objId
j . JobTypeID64 = jobTypeID64
j . Trigger = trigger
j . Timeout = time . Unix ( maxUnixTimestamp , 0 ) . UTC ( )
j . UserID64 = userID64
j . Payload = payload
muxObjJob . Lock ( )
cacheObjJob [ objId ] = * j
muxObjJob . Unlock ( )
2019-06-11 16:56:48 +02:00
return objId , nil
2019-06-11 16:50:01 +02:00
}
2019-05-27 05:08:10 +02:00
2020-01-02 13:05:33 +01:00
func createJobCallback ( jobTypeID64 int64 , userID64 int64 , msgTypeID64 int64 , payload [ ] byte , timeout time . Duration ) error {
2019-08-23 14:09:08 +02:00
//t, err := time.Parse(time.RFC3339, "9999-12-31T00:00:00+00:00")
2020-01-02 13:05:33 +01:00
jobID64 , err := createJob ( jobTypeID64 , objJobPriority , userID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) , payload )
2019-08-17 08:18:25 +02:00
if err != nil {
return err
}
setJobCallback ( jobID64 , userID64 , msgTypeID64 )
2019-08-29 12:24:21 +02:00
err = setJobTimeout ( jobID64 , timeout )
logOnError ( err , "createJobCallback : setJobTimeout" )
2019-08-17 08:18:25 +02:00
return nil
}
func setJobCallback ( jobID64 int64 , userID64 int64 , msgTypeID64 int64 ) {
muxCallbacks . Lock ( )
2019-08-17 08:38:37 +02:00
if _ , ok := callbacks [ userID64 ] ; ! ok {
2019-08-17 08:43:26 +02:00
callbacks [ userID64 ] = make ( map [ int64 ] [ ] int64 )
2019-08-17 08:38:37 +02:00
}
s := callbacks [ userID64 ] [ msgTypeID64 ]
s = append ( s , jobID64 )
callbacks [ userID64 ] [ msgTypeID64 ] = s
2019-08-17 08:18:25 +02:00
muxCallbacks . Unlock ( )
}
2019-08-23 14:09:08 +02:00
func setJobTimeout ( jobID64 int64 , d time . Duration ) error {
stmt , err := db . Prepare ( ` UPDATE obj_job j SET j.timeout = ? WHERE j.obj_id = ?; ` )
logOnError ( err , "setJobTimeout : prepare update obj_job" )
if err != nil {
return err
}
defer stmt . Close ( )
2020-01-12 10:32:31 +01:00
t := time . Now ( ) . UTC ( ) . Add ( d )
_ , err = stmt . Exec ( t , jobID64 )
2020-01-26 07:35:38 +01:00
logOnError ( err , fmt . Sprintf ( "setJobTimeout : update obj_job(%d)" , jobID64 ) )
2019-08-23 14:09:08 +02:00
if err != nil {
return err
}
2020-01-12 10:32:31 +01:00
muxObjJob . Lock ( )
j := cacheObjJob [ jobID64 ]
j . Timeout = t
cacheObjJob [ jobID64 ] = j
muxObjJob . Unlock ( )
2019-08-23 14:09:08 +02:00
return nil
}
2020-01-26 14:40:20 +01:00
func setJobPayloadJSON ( jobID64 int64 , payload interface { } ) error {
b , err := json . Marshal ( payload )
logOnError ( err , "setJobPayloadJSON" )
if err != nil {
return err
}
return setJobPayload ( jobID64 , b )
}
2020-01-17 05:24:07 +01:00
func setJobPayload ( jobID64 int64 , payload [ ] byte ) error {
2020-01-26 07:53:25 +01:00
var (
zb bytes . Buffer
zpayload [ ] byte
zipped int
)
2020-01-17 05:24:07 +01:00
2020-01-26 07:47:29 +01:00
if len ( payload ) > 10000 {
zw := zlib . NewWriter ( & zb )
zw . Write ( payload )
zw . Close ( )
zpayload = zb . Bytes ( )
zipped = 1
} else {
zpayload = payload
zipped = 0
}
2020-01-17 05:24:07 +01:00
2020-01-26 07:47:29 +01:00
if len ( zpayload ) > 10000 {
2020-01-17 05:27:26 +01:00
return errors . New ( "payload too long" )
2020-01-17 05:24:07 +01:00
}
2020-01-26 07:47:29 +01:00
stmt , err := db . Prepare ( ` UPDATE obj_job j SET j.payload = ?, j.zipped = ? WHERE j.obj_id = ?; ` )
2020-01-17 05:24:07 +01:00
logOnError ( err , "setJobTimeout : prepare update obj_job" )
2020-01-26 07:47:29 +01:00
2020-01-17 05:24:07 +01:00
if err != nil {
return err
}
defer stmt . Close ( )
2020-01-26 07:47:29 +01:00
_ , err = stmt . Exec ( zpayload , zipped , jobID64 )
2020-01-26 07:35:38 +01:00
logOnError ( err , fmt . Sprintf ( "setJobPayload : update obj_job(%d)" , jobID64 ) )
2020-01-17 05:24:07 +01:00
if err != nil {
return err
}
muxObjJob . Lock ( )
j := cacheObjJob [ jobID64 ]
2020-01-17 05:27:01 +01:00
j . Payload = payload
2020-01-17 05:24:07 +01:00
cacheObjJob [ jobID64 ] = j
muxObjJob . Unlock ( )
2020-01-26 15:07:13 +01:00
//log.Printf("setJobPayload[%d] : %s\n", jobID64, string(payload))
2020-01-17 05:24:07 +01:00
return nil
}
2020-01-26 14:47:13 +01:00
func getJobPayload ( jobID64 int64 ) [ ] byte {
2020-01-26 14:40:20 +01:00
var b [ ] byte
muxObjJob . Lock ( )
defer muxObjJob . Unlock ( )
2020-01-26 14:47:13 +01:00
if j , ok := cacheObjJob [ jobID64 ] ; ok {
b = j . Payload
return b
} else {
return nil
}
2020-01-26 14:40:20 +01:00
}
2019-08-23 14:10:44 +02:00
func setJobDone ( jobID64 int64 ) error {
2019-06-11 16:56:48 +02:00
stmt , err := db . Prepare ( ` UPDATE obj_job j SET j.is_done = 1, j.in_work = 0, j.ended = ? WHERE j.obj_id = ?; ` )
logOnError ( err , "setJobDone : prepare update obj_job" )
if err != nil {
return err
2019-05-27 05:08:10 +02:00
}
2019-06-11 16:56:48 +02:00
defer stmt . Close ( )
2019-05-27 05:08:10 +02:00
2019-08-23 14:10:44 +02:00
_ , err = stmt . Exec ( time . Now ( ) . UTC ( ) , jobID64 )
s := fmt . Sprintf ( "setJobDone, update obj_job(%d)" , jobID64 )
2019-06-11 16:56:48 +02:00
logOnError ( err , s )
if err != nil {
return err
}
return nil
2019-06-11 16:50:01 +02:00
}
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
func setJobStart ( jobId int64 ) error {
stmt , err := db . Prepare ( ` UPDATE obj_job j SET j.started = coalesce(j.started, ?) WHERE j.obj_id = ?; ` )
logOnError ( err , "setJobStart : prepare update obj_job" )
if err != nil {
return err
}
defer stmt . Close ( )
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
_ , err = stmt . Exec ( time . Now ( ) . UTC ( ) , jobId )
s := fmt . Sprintf ( "setJobStart, update obj_job(%d)" , jobId )
logOnError ( err , s )
if err != nil {
return err
}
return nil
2019-05-27 05:08:10 +02:00
}
2019-08-21 05:46:42 +02:00
func rescheduleJob ( jobID64 int64 , trigger int64 , schedule time . Time ) error {
2019-12-15 08:37:58 +01:00
stmt , err := db . Prepare ( ` UPDATE obj_job j SET j.in_work = 0, j.schedule = ?, j.trigger_id = ? WHERE j.obj_id = ?; ` )
2019-06-11 16:56:48 +02:00
logOnError ( err , "rescheduleJob : prepare update obj_job" )
if err != nil {
return err
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
defer stmt . Close ( )
2019-08-21 05:46:42 +02:00
_ , err = stmt . Exec ( schedule . UTC ( ) , trigger , jobID64 )
2019-06-11 16:56:48 +02:00
s := fmt . Sprintf ( "rescheduleJob, update obj_job(%d)" , jobID64 )
logOnError ( err , s )
if err != nil {
return err
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
return nil
2019-06-11 16:50:01 +02:00
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
func loadCurrentJobs ( ) ( [ ] Job , error ) {
var (
2020-01-02 13:07:46 +01:00
objId int64
jobTypeID64 int64
userID64 int64
trigger int64
timeout time . Time
2020-01-21 07:14:58 +01:00
zipped int
2020-01-12 10:11:34 +01:00
zpayload [ ] byte
2020-01-02 13:07:46 +01:00
jobs [ ] Job
2019-06-11 16:56:48 +02:00
)
t := time . Now ( ) . UTC ( )
r := RndInt64 ( )
_ , err := db . Exec ( "UPDATE obj_job j SET j.pulled = ?, j.in_work = 1, j.seq_nr = ? WHERE j.is_done = 0 AND j.in_work = 0 AND j.schedule <= ? ORDER BY j.priority ASC, j.obj_id ASC LIMIT ?;" , t , r , t , SQLJobSliceSize )
logOnError ( err , "loadCurrentJobs : update intial rows" )
2019-08-01 05:49:05 +02:00
if err != nil {
return jobs , err
}
2019-06-11 16:56:48 +02:00
2020-01-21 07:14:58 +01:00
stmt , err := db . Prepare ( "SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.zipped, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;" )
2019-06-11 16:56:48 +02:00
logOnError ( err , "loadCurrentJobs : prepare select statement" )
2019-08-01 05:49:05 +02:00
if err != nil {
stmt . Close ( )
return jobs , err
}
2019-06-11 16:56:48 +02:00
rows , err := stmt . Query ( r )
// rows, err := stmt.Query(time.Now())
logOnError ( err , "loadCurrentJobs : query select statement" )
2019-08-01 05:49:05 +02:00
if err != nil {
stmt . Close ( )
return jobs , err
}
2019-06-11 16:56:48 +02:00
for rows . Next ( ) {
2020-01-21 07:14:58 +01:00
err = rows . Scan ( & objId , & jobTypeID64 , & trigger , & userID64 , & zipped , & zpayload , & timeout )
2019-06-11 16:56:48 +02:00
logOnError ( err , "loadCurrentJobs : scan query rows" )
2020-01-12 10:11:34 +01:00
2020-01-21 07:14:58 +01:00
var payload [ ] byte
if zipped > 0 {
zb := bytes . NewReader ( zpayload )
zr , err := zlib . NewReader ( zb )
if err != nil {
logOnError ( err , "loadCurrentJobs : zlib.NewReader" )
continue
}
b := new ( bytes . Buffer )
b . ReadFrom ( zr )
payload = b . Bytes ( )
} else {
payload = zpayload
2020-01-12 10:11:34 +01:00
}
2019-06-11 16:56:48 +02:00
job := Job {
2020-01-02 13:07:46 +01:00
ID64 : objId ,
JobTypeID64 : jobTypeID64 ,
Trigger : trigger ,
UserID64 : userID64 ,
Timeout : timeout ,
2020-01-12 10:11:34 +01:00
Payload : payload ,
2019-06-11 16:56:48 +02:00
}
2020-01-12 10:11:34 +01:00
2019-06-11 16:56:48 +02:00
jobs = append ( jobs , job )
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
err = rows . Err ( )
logOnError ( err , "loadCurrentJobs : scan end rows" )
rows . Close ( )
2019-08-01 05:49:05 +02:00
if err != nil {
stmt . Close ( )
return jobs , err
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = stmt . Close ( )
logOnError ( err , "loadCurrentJobs : close select statement" )
2019-08-01 05:49:05 +02:00
if err != nil {
return jobs , err
}
2019-06-11 16:56:48 +02:00
return jobs , nil
2019-06-11 16:50:01 +02:00
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
func jobRescan ( j Job ) {
2019-06-14 06:18:26 +02:00
var p JobPayloadRescanMsg
2019-05-19 05:24:45 +02:00
2019-06-11 16:56:48 +02:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobRescan : setJobStart" )
2019-05-11 07:06:40 +02:00
2019-06-14 06:18:26 +02:00
err = json . Unmarshal ( j . Payload , & p )
2019-06-11 16:56:48 +02:00
logOnError ( err , "jobRescan : Unmarshal payload" )
start := time . Now ( )
2019-06-14 06:15:45 +02:00
milestone := time . Now ( )
2019-06-11 16:50:01 +02:00
2019-06-14 06:19:28 +02:00
ids := getSQLListID64 ( p . Query )
2019-06-11 16:56:48 +02:00
if len ( ids ) > 1 {
2019-06-14 06:15:45 +02:00
txt := fmt . Sprintf ( "Rescanning %d messages." , len ( ids ) )
m := TGCommand {
Type : commandReplyMsg ,
Text : txt ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
i := 0
2019-06-11 16:56:48 +02:00
for _ , id := range ids {
SQLMsgIdentifyQueue <- id
2019-06-14 06:15:45 +02:00
i = i + 1
if time . Now ( ) . After ( milestone . Add ( 1 * time . Minute ) ) {
2019-06-14 06:18:26 +02:00
//txt := fmt.Sprintf("Rescanned %d/%d messages.", i, len(ids))
2019-06-14 06:15:45 +02:00
m = TGCommand {
Type : commandReplyMsg ,
2019-06-14 06:18:26 +02:00
Text : fmt . Sprintf ( "Rescanned %d/%d messages." , i , len ( ids ) ) ,
2019-06-14 06:15:45 +02:00
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
milestone = time . Now ( )
}
2019-06-11 16:56:48 +02:00
}
2019-06-14 06:15:45 +02:00
2019-06-14 06:18:26 +02:00
r := JobPayloadSetDone {
2019-06-11 16:56:48 +02:00
JobID64 : j . ID64 ,
2019-06-14 06:18:26 +02:00
MsgID64 : p . MsgID64 ,
ChatID64 : p . ChatID64 ,
2019-06-11 16:56:48 +02:00
Text : fmt . Sprintf ( "%d messages processed in %s." , len ( ids ) , time . Since ( start ) ) ,
2019-05-11 06:54:12 +02:00
}
2019-06-14 06:18:26 +02:00
b , _ := json . Marshal ( r )
2020-01-02 13:04:12 +01:00
_ , err := createJob ( cacheObjSubType [ ` job_set_done ` ] , objJobPriorityRescanAllMsg , j . UserID64 , j . ID64 , time . Now ( ) . UTC ( ) , b )
logOnError ( err , "jobRescan : createJob(cacheObjSubType[`job_set_done`])" )
2019-06-11 16:56:48 +02:00
} else if len ( ids ) == 1 {
SQLMsgIdentifyQueue <- ids [ 0 ]
err = setJobDone ( j . ID64 )
logOnError ( err , "jobRescan : setJobDone(1)" )
2019-06-14 06:18:52 +02:00
if p . MsgID64 != 0 || p . ChatID64 != 0 {
2019-06-11 16:56:48 +02:00
m := TGCommand {
2019-05-16 05:06:38 +02:00
Type : commandReplyMsg ,
2019-06-11 16:56:48 +02:00
Text : "One message processed." ,
2019-06-14 06:18:52 +02:00
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
2019-05-16 05:06:38 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
}
} else {
err = setJobDone ( j . ID64 )
logOnError ( err , "jobRescan : setJobDone(0)" )
2019-06-14 06:19:28 +02:00
if p . MsgID64 != 0 || p . ChatID64 != 0 {
2019-06-11 16:56:48 +02:00
m := TGCommand {
2019-05-16 05:06:38 +02:00
Type : commandReplyMsg ,
2019-06-11 16:56:48 +02:00
Text : "No message processed." ,
2019-06-14 06:18:52 +02:00
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
2019-05-16 05:06:38 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-16 04:49:34 +02:00
}
2019-05-11 06:54:12 +02:00
}
return
}
2019-06-11 16:56:48 +02:00
func jobSetDone ( j Job ) {
var r JobPayloadSetDone
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobSetDone : setJobStart" )
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = json . Unmarshal ( j . Payload , & r )
logOnError ( err , "jobSetDone : Unmarshal payload" )
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone ( r . JobID64 )
logOnError ( err , "jobSetDone : setJobDone(child)" )
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobSetDone : setJobDone" )
2019-06-11 16:50:01 +02:00
2019-06-11 16:56:48 +02:00
m := TGCommand {
Type : commandReplyMsg ,
Text : r . Text ,
FromMsgID64 : r . MsgID64 ,
FromChatID64 : r . ChatID64 ,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-16 04:52:30 +02:00
2019-05-11 06:54:12 +02:00
return
}
2019-05-16 14:39:12 +02:00
2019-06-11 16:56:48 +02:00
func jobPillage ( j Job ) {
var r JobPayloadPillage
err := setJobStart ( j . ID64 )
logOnError ( err , "jobPillage : setJobStart" )
err = json . Unmarshal ( j . Payload , & r )
logOnError ( err , "jobPillage : Unmarshal payload" )
2020-02-05 07:53:30 +01:00
// check if we have a acknowledgment of go or a timeout within 3m30 of the PillageInc from the Job
2019-10-11 08:53:49 +02:00
ids := getSQLListID64 ( ` SELECT ox . id
FROM obj ox
, obj_msg omx
, obj op
, obj_msg omp
, obj_job oj
WHERE oj . obj_id = ` + strconv.FormatInt(j.ID64, 10) + `
AND omx . user_id = oj . user_id
AND omx . sender_user_id = ` + strconv.Itoa(userID64ChtWrsBot) + `
AND omx . obj_id = ox . id
2020-01-02 12:40:45 +01:00
AND ox . obj_sub_type_id in ( ` + strconv.FormatInt(cacheObjSubType[ ` msg_pillage_go ` ] , 10 ) +
` , ` + strconv . FormatInt ( cacheObjSubType [ ` msg_pillage_timeout ` ] , 10 ) +
` , ` + strconv . FormatInt ( cacheObjSubType [ ` msg_pillage_loss ` ] , 10 ) +
` , ` + strconv . FormatInt ( cacheObjSubType [ ` msg_pillage_win ` ] , 10 ) + ` )
2019-10-11 08:53:49 +02:00
AND op . id = ` + strconv.FormatInt(r.ObjID64, 10) + `
AND omp . obj_id = op . id
AND omx . date between omp . date AND ADDTIME ( omp . date , ' 0 0 : 3 : 30.000000 ' )
2020-01-02 12:40:45 +01:00
ORDER BY CASE ox . obj_sub_type_id WHEN ` + strconv.FormatInt(cacheObjSubType[ ` msg_pillage_win ` ], 10) + ` THEN 0
WHEN ` + strconv.FormatInt(cacheObjSubType[ ` msg_pillage_loss ` ], 10) + ` THEN 1
WHEN ` + strconv.FormatInt(cacheObjSubType[ ` msg_pillage_timeout ` ], 10) + ` THEN 2
WHEN ` + strconv.FormatInt(cacheObjSubType[ ` msg_pillage_go ` ], 10) + ` THEN 3
2019-10-11 08:53:49 +02:00
ELSE 4 END ASC
LIMIT 1 ; ` )
2019-06-11 16:56:48 +02:00
if len ( ids ) > 1 { // issue there ?
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "More than one outcome for pillage #%d" , r . ObjID64 ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
} else if len ( ids ) == 1 { // we've got a match, job is done whether we prevented the pillage or not
m , err := getObjMsg ( ids [ 0 ] )
2020-01-02 12:40:45 +01:00
logOnError ( err , "jobPillage : getMsg(cacheObjSubType[`msg_pillage_go`], cacheObjSubType[`msg_pillage_timeout`], 10)" )
2019-06-11 16:56:48 +02:00
if err == nil {
if m . Date . Add ( 60 * time . Second ) . After ( time . Now ( ) . UTC ( ) ) {
msgTypeID64 , err := getObjSubTypeId ( ids [ 0 ] )
logOnError ( err , "jobPillage : getObjSubTypeId" )
if err == nil {
2020-01-02 12:35:03 +01:00
if msgTypeID64 == cacheObjSubType [ ` msg_pillage_go ` ] {
2019-06-11 16:56:48 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We avoided a pillage (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType [ ` msg_pillage_win ` ] {
2019-06-11 16:56:48 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We avoided a pillage (%s))" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType [ ` msg_pillage_loss ` ] {
2019-06-11 16:56:48 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We got pillaged (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType [ ` msg_pillage_timeout ` ] {
2019-06-11 16:56:48 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We got pillaged (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
} else {
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We don't know what happened (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
}
}
}
2019-05-27 05:08:10 +02:00
}
2019-06-11 16:56:48 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobSetDone : setJobDone" )
2019-05-27 05:08:10 +02:00
return
}
2019-07-31 04:06:05 +02:00
// is the job outdated now ?
2019-08-01 12:37:11 +02:00
if time . Now ( ) . UTC ( ) . After ( r . Date . Add ( time . Minute * 3 + time . Second * 30 ) ) {
2019-08-04 16:33:23 +02:00
// log.Printf("jobPillage :\n\tPillageTime : %s\n\tNowTime : %s\n", r.Date.Format(time.RFC3339), time.Now().UTC().Format(time.RFC3339))
2019-07-31 04:06:05 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "Pillage interception expired" ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
return
}
2019-06-11 16:56:48 +02:00
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "No outcome for the pillage yet" ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
//no outcome yet, have we sent a "/go" in the last 30 sec ?
ids = getSQLListID64 ( ` select ox . id
from obj ox
, obj_msg omx
, obj_job oj
where oj . obj_id = ` + strconv.FormatInt(j.ID64, 10) + `
and omx . user_id = oj . user_id
and omx . sender_user_id = oj . user_id
and omx . obj_id = ox . id
2020-01-02 12:40:45 +01:00
and ox . obj_sub_type_id = ` + strconv.FormatInt(cacheObjSubType[ ` msg_go ` ], 10) + `
2019-06-11 16:56:48 +02:00
and omx . date between addtime ( oj . schedule , ' - 30 ' ) and oj . schedule ; ` )
if len ( ids ) > 0 { // we did, so we reschedule the job to check the outcome and wait
m , err := getObjMsg ( ids [ 0 ] )
2020-02-05 07:53:30 +01:00
logOnError ( err , "jobPillage : getMsg" )
2019-06-11 16:56:48 +02:00
if err == nil {
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "We started intercepting the pillage (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
2019-05-27 05:08:10 +02:00
}
2019-08-21 05:46:42 +02:00
err = rescheduleJob ( j . ID64 , j . Trigger , time . Now ( ) . Add ( 30 * time . Second ) . UTC ( ) )
2020-01-02 12:40:45 +01:00
logOnError ( err , "jobPillage : rescheduleJob(cacheObjSubType[`msg_go`], 10)" )
2019-06-11 16:56:48 +02:00
} else { //no /go in the last 30 sec so we go ahead, send one and reschedule to check again in 25sec
2020-02-05 07:53:30 +01:00
t , err := getObjSubTypeId ( r . ObjID64 )
logOnError ( err , "jobPillage : getObjSubTypeId" )
2020-02-05 07:54:30 +01:00
m , err := getObjMsg ( r . ObjID64 )
logOnError ( err , "jobPillage : getObjMsg" )
2020-02-05 07:53:30 +01:00
if t == cacheObjSubType [ ` msg_pillage_inc ` ] {
clientSendCWMsg ( j . UserID64 , "/go" )
} else if t == cacheObjSubType [ ` msg_pillage_inc2 ` ] {
if len ( m . Callbacks ) != 1 {
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "More than one button (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
} else {
for _ , c := range m . Callbacks {
if c . Name == ` 🧹Intervene ` {
clientCallback ( j . UserID64 , m . ID64 , m . ChatID64 , c . Name , c . Data )
}
}
}
} else {
s := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "Unknown pillage version (%s)" , m . Date . Format ( time . RFC3339 ) ) ,
ToUserID64 : j . UserID64 ,
}
TGCmdQueue <- s
}
2019-08-21 05:46:42 +02:00
err = rescheduleJob ( j . ID64 , j . Trigger , time . Now ( ) . Add ( 30 * time . Second ) . UTC ( ) )
2019-06-11 16:56:48 +02:00
logOnError ( err , "jobPillage : rescheduleJob" )
2019-05-27 05:08:10 +02:00
}
2019-05-16 14:39:12 +02:00
return
}
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
func jobMsgRefresh ( j Job ) {
var p JobPayloadMsgRefresh
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
// identify whether the message has been properly refreshed ? create new job ? reschedule same job ?
err := setJobStart ( j . ID64 )
logOnError ( err , "jobMsgRefresh : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobMsgRefresh : Unmarshal payload" )
m , err := getObjMsg ( p . ObjID64 )
if err != nil && strings . Compare ( err . Error ( ) , ` sql: no rows in result set ` ) == 0 {
err = setJobDone ( j . ID64 )
logOnError ( err , "joMsgClient : setJobDone" )
return
2019-06-11 09:44:28 +02:00
} else {
2019-06-11 16:56:48 +02:00
logOnError ( err , "jobMsgRefresh : getObjMsg" )
err = setJobDone ( j . ID64 )
logOnError ( err , "joMsgClient : setJobDone" )
2019-06-11 09:44:28 +02:00
return
}
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
err = delObj ( p . ObjID64 )
logOnError ( err , "jobMsgRefresh : delObj" )
2019-06-10 06:03:47 +02:00
2019-07-31 10:57:45 +02:00
clientRefreshCWMsg ( m . TGUserID64 , m . ChatID64 , m . ID64 )
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "joMsgClient : setJobDone" )
2019-06-10 06:03:47 +02:00
return
}
2019-06-11 16:56:48 +02:00
func jobMsgClient ( j Job ) {
var p JobPayloadMsgClient
err := setJobStart ( j . ID64 )
logOnError ( err , "jobMsgClient : setJobStart" )
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobMsgClient : Unmarshal payload" )
if err == nil {
clientSendCWMsg ( j . UserID64 , p . Text )
m := TGCommand {
2019-06-03 03:01:18 +02:00
Type : commandReplyMsg ,
2019-06-11 16:56:48 +02:00
Text : "Message sent." ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
2019-06-03 03:01:18 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-30 06:12:01 +02:00
}
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "joMsgClient : setJobDone" )
2019-05-26 15:06:12 +02:00
return
}
2019-06-11 03:59:20 +02:00
2020-01-07 11:32:20 +01:00
func jobMsgFwd ( j Job ) {
var p JobPayloadMsgFwd
err := setJobStart ( j . ID64 )
logOnError ( err , "jobFwdMsg : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobFwdMsg : Unmarshal payload" )
msg , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobFwdMsg : getObjMsg msg" )
clientFwdCWMsg ( j . UserID64 , msg . ID64 , msg . ChatID64 , p . ChatID64 )
err = setJobDone ( j . ID64 )
logOnError ( err , "jobFwdMsg : setJobDone" )
return
}
2020-01-08 10:21:00 +01:00
func jobMsgDelete ( j Job ) {
2020-01-07 11:32:20 +01:00
var p JobPayloadMsgDel
err := setJobStart ( j . ID64 )
logOnError ( err , "jobMsgDel : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobMsgDel : Unmarshal payload" )
2020-01-13 09:42:26 +01:00
b , _ := json . Marshal ( p )
2020-01-14 07:58:34 +01:00
log . Printf ( "jobMsgDelete[%d] : %d : Payload => %s.\n" , j . ID64 , j . UserID64 , string ( b ) )
2020-01-13 09:42:26 +01:00
2020-01-13 09:30:41 +01:00
if j . Trigger != 0 && p . MsgTypeID64 != 0 {
logOnError ( err , "jobMsgDel : getObjMsg msg" )
id , err := getObjSubTypeId ( j . Trigger )
2020-01-13 09:36:13 +01:00
logOnError ( err , "jobMsgDelete : getObjSubTypeId(" + strconv . FormatInt ( j . Trigger , 10 ) + ")" )
2020-01-13 09:30:41 +01:00
if id == p . MsgTypeID64 {
if p . Delay == 0 {
2020-01-13 09:32:42 +01:00
obj , err := getObjMsg ( j . Trigger )
2020-01-13 09:36:13 +01:00
logOnError ( err , "jobMsgDelete : getObjMsg(" + strconv . FormatInt ( j . Trigger , 10 ) + ")" )
2020-01-13 09:30:41 +01:00
clientDelTGMsg ( j . UserID64 , obj . ID64 , obj . ChatID64 )
} else {
2020-01-13 10:04:20 +01:00
delay := p . Delay
2020-01-13 10:03:56 +01:00
p . Delay = 0
2020-01-13 09:30:41 +01:00
p . ObjMsgID64 = j . Trigger
b , _ := json . Marshal ( p )
2020-01-13 10:03:56 +01:00
_ , err = createJob ( cacheObjSubType [ ` job_msg_del ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . Add ( delay ) . UTC ( ) , b )
2020-01-13 09:30:41 +01:00
}
} else {
log . Printf ( "jobMsgDelete : cannot identify msg to delete" )
}
} else if p . ObjMsgID64 != 0 {
if p . Delay == 0 {
2020-01-13 09:45:26 +01:00
obj , err := getObjMsg ( p . ObjMsgID64 )
logOnError ( err , "jobMsgDelete : getObjMsg(" + strconv . FormatInt ( p . ObjMsgID64 , 10 ) + ")" )
2020-01-13 09:30:41 +01:00
clientDelTGMsg ( j . UserID64 , obj . ID64 , obj . ChatID64 )
} else {
2020-01-13 10:03:56 +01:00
delay := p . Delay
2020-01-13 09:43:58 +01:00
p . Delay = 0
2020-01-13 09:30:41 +01:00
b , _ := json . Marshal ( p )
2020-01-13 10:03:56 +01:00
_ , err = createJob ( cacheObjSubType [ ` job_msg_del ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . Add ( delay ) . UTC ( ) , b )
2020-01-13 09:30:41 +01:00
}
}
2020-01-07 11:32:20 +01:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobMsgDel : setJobDone" )
return
}
2019-06-11 16:56:48 +02:00
func jobBackupExport ( j Job ) {
var p JobPayloadBackupExport
err := setJobStart ( j . ID64 )
logOnError ( err , "jobBackupExport : setJobStart" )
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobBackupExport : Unmarshal payload" )
bkp := DataBackup { }
start := time . Now ( )
milestone := time . Now ( )
s := new ( [ ] ChatWarsMessage )
msgs := * s
ids := getSQLListID64 ( ` SELECT om.obj_id id FROM obj_msg om; ` )
txt := fmt . Sprintf ( "Backing up %d messages." , len ( ids ) )
m := TGCommand {
Type : commandReplyMsg ,
Text : txt ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
2019-06-11 03:59:20 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
i := 0
for _ , id := range ids {
2019-06-11 17:41:31 +02:00
msg , err := getObjMsg ( id )
2019-06-11 16:56:48 +02:00
logOnError ( err , "jobBackupExport : getMsg" )
if err == nil {
2019-06-11 17:41:31 +02:00
msgs = append ( msgs , * msg )
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
i = i + 1
2019-06-11 18:00:20 +02:00
if time . Now ( ) . After ( milestone . Add ( 1 * time . Minute ) ) {
2019-06-11 16:56:48 +02:00
txt := fmt . Sprintf ( "Exported %d/%d messages." , i , len ( ids ) )
m = TGCommand {
Type : commandReplyMsg ,
Text : txt ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
milestone = time . Now ( )
2019-06-11 16:50:01 +02:00
}
}
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
bkp . Messages = msgs
b , err := json . Marshal ( bkp )
logOnError ( err , "jobBackupExport : Marshal" )
m = TGCommand {
Type : commandReplyMsg ,
Text : ` Compressing archive ` ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
zbuf := new ( bytes . Buffer )
zw := zip . NewWriter ( zbuf )
zf , err := zw . Create ( ` backup.json ` )
logOnError ( err , "jobBackupExport : Create" )
_ , err = zf . Write ( b )
logOnError ( err , "jobBackupExport : Write" )
err = zw . Close ( )
logOnError ( err , "jobBackupExport : Close" )
d := tb . Document { }
d . File = tb . FromReader ( bytes . NewReader ( zbuf . Bytes ( ) ) )
d . FileName = fmt . Sprintf ( "%s.backup.zip" , start . Format ( "20060102150405" ) )
d . Caption = d . FileName
d . MIME = ` application/zip `
m = TGCommand {
2019-06-11 16:50:01 +02:00
Type : commandReplyMsg ,
2019-06-11 16:56:48 +02:00
Text : ` Export done. ` ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
m = TGCommand {
Type : commandSendDocument ,
Document : d ,
ToChatID64 : p . ChatID64 ,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
err = setJobDone ( j . ID64 )
logOnError ( err , "jobBackupExport : setJobDone" )
2019-06-11 16:50:01 +02:00
return
}
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
func jobBackupImport ( j Job ) {
var p JobPayloadBackupImport
err := setJobStart ( j . ID64 )
logOnError ( err , "jobBackupImport : setJobStart" )
2019-06-11 16:50:01 +02:00
2019-06-11 16:56:48 +02:00
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobBackupImport : Unmarshal payload" )
2019-06-11 16:50:01 +02:00
2019-06-11 17:38:12 +02:00
resp , err := http . Get ( p . URL )
logOnError ( err , "jobBackupImport : Get" )
defer resp . Body . Close ( )
buf := new ( bytes . Buffer )
buf . ReadFrom ( resp . Body )
m := TGCommand {
Type : commandReplyMsg ,
Text : "File downloaded." ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
z := buf . Bytes ( )
r := bytes . NewReader ( z )
zr , err := zip . NewReader ( r , int64 ( len ( z ) ) )
for _ , f := range zr . File {
if strings . Compare ( f . Name , "backup.json" ) == 0 {
rc , err := f . Open ( )
logOnError ( err , "jobBackupImport : Open" )
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
data , err := ioutil . ReadAll ( rc )
logOnError ( err , "jobBackupImport : ReadAll" )
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
log . Printf ( "jobBackupImport : %d uncompressed bytes.\n" , len ( data ) )
rc . Close ( )
bkp := DataBackup { }
err = json . Unmarshal ( data , & bkp )
logOnError ( err , "jobBackupImport : Unmarshal" )
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
2019-06-11 17:58:19 +02:00
for _ , msg := range bkp . Messages {
MQCWMsgQueue <- msg
2019-06-11 17:38:12 +02:00
}
m := TGCommand {
Type : commandReplyMsg ,
Text : "Backup restored." ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
err = setJobDone ( j . ID64 )
logOnError ( err , "jobBackupImport : setJobDone" )
return
2019-06-11 16:56:48 +02:00
}
2019-06-11 03:59:20 +02:00
}
2019-06-11 17:55:31 +02:00
m = TGCommand {
2019-06-11 17:38:12 +02:00
Type : commandReplyMsg ,
Text : "Not backup file found in archive." ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
}
TGCmdQueue <- m
2019-06-11 16:56:48 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobBackupImport : setJobDone" )
2019-06-11 03:59:20 +02:00
return
}
2019-08-08 14:39:23 +02:00
func jobGStock ( j Job ) {
2020-01-26 14:40:20 +01:00
var (
2020-01-27 05:05:38 +01:00
p JobPayloadGStock
p2 JobPayloadGetVault
b [ ] byte
resSize , resCount int64
alchSize , alchCount int64
miscSize , miscCount int64
recSize , recCount int64
partSize , partCount int64
otherSize , otherCount int64
totalSize int64
2020-01-26 14:40:20 +01:00
)
2020-01-27 05:05:38 +01:00
2019-08-08 14:39:23 +02:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGStock : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGStock : Unmarshal payload" )
2019-12-13 12:19:52 +01:00
2020-01-26 14:40:20 +01:00
if p . Status == 0 {
p2 . JobCallbackID64 = j . ID64
p2 . ItemTypeList = make ( [ ] int64 , 0 )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_res ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_alch ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_misc ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_recipe ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_part ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_other ` ] )
2020-01-26 14:48:49 +01:00
b , err = json . Marshal ( p2 )
2020-01-26 14:40:20 +01:00
logOnError ( err , "jobGStock : Marshal(p2)" )
jobID64 , err := createJob ( cacheObjSubType [ ` job_get_vault ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . UTC ( ) , b )
p . Status = 1
2020-01-26 14:53:56 +01:00
p . VaultJobID64 = jobID64
2020-01-26 14:40:20 +01:00
b , err = json . Marshal ( p )
logOnError ( err , "jobGStock : Marshal(p)" )
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGStock : setJobPayloadJSON(p)" )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
}
2020-01-26 14:48:49 +01:00
b = getJobPayload ( p . VaultJobID64 )
2020-01-26 14:40:20 +01:00
err = json . Unmarshal ( b , & p2 )
logOnError ( err , "jobGStock : Unmarshal(p2)" )
for _ , v := range p2 . Vault {
item , err := getObjItem ( v . ItemID64 )
logOnError ( err , "jobGStock : getObjItem" )
if err == nil {
if item . Weight != - 1 {
totalSize += item . Weight * v . Quantity
switch item . ItemTypeID {
case cacheObjSubType [ ` item_res ` ] :
resSize += item . Weight * v . Quantity
resCount += v . Quantity
case cacheObjSubType [ ` item_alch ` ] :
alchSize += item . Weight * v . Quantity
alchCount += v . Quantity
case cacheObjSubType [ ` item_misc ` ] :
miscSize += item . Weight * v . Quantity
miscCount += v . Quantity
case cacheObjSubType [ ` item_recipe ` ] :
recSize += item . Weight * v . Quantity
recCount += v . Quantity
case cacheObjSubType [ ` item_part ` ] :
partSize += item . Weight * v . Quantity
partCount += v . Quantity
case cacheObjSubType [ ` item_other ` ] :
otherSize += item . Weight * v . Quantity
otherCount += v . Quantity
2019-12-13 12:19:52 +01:00
}
2020-01-26 14:40:20 +01:00
} else {
w := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "Unknown weight for item : %s - %s\n" , item . Code , item . Names [ 0 ] ) ,
ToUserID64 : cfg . Bot . Admin ,
}
TGCmdQueue <- w
2019-12-13 12:19:52 +01:00
}
}
2020-01-26 14:40:20 +01:00
}
2019-12-14 06:18:20 +01:00
2020-01-26 14:40:20 +01:00
txt := fmt . Sprintf ( "<code>Current stock [%d/38000] :\n - Resources : %d (%d)\n - Alchemist : %d (%d)\n - Misc stuff : %d (%d)\n - Recipes : %d (%d)\n - Parts : %d (%d)\n - Other : %d (%d)</code>\n" , totalSize , resSize , resCount , alchSize , alchCount , miscSize , miscCount , recSize , recCount , partSize , partCount , otherSize , otherCount )
2019-12-13 12:19:52 +01:00
2020-01-26 14:40:20 +01:00
m := TGCommand {
Type : commandReplyMsg ,
Text : txt ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
ParseMode : cmdParseModeHTML ,
2019-08-08 14:39:23 +02:00
}
2020-01-26 14:40:20 +01:00
TGCmdQueue <- m
2019-08-08 14:39:23 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobGStock : setJobDone" )
return
}
2019-08-19 12:41:43 +02:00
2019-08-29 11:18:41 +02:00
func jobGDepositForward ( j Job ) {
2019-08-29 12:24:21 +02:00
var p JobPayloadGDepositForward
2019-08-27 17:10:57 +02:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGDepositForward : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGDepositForward : Unmarshal payload" )
msg , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGDepositForward : getObjMsg" )
rule , err := getMsgParsingRule ( msg )
logOnError ( err , "jobGDepositForward : getMsgParsingRule" )
2019-08-30 04:14:30 +02:00
cwm , err := parseSubTypeMessageGDepositReq ( msg , rule . re )
2019-08-27 17:10:57 +02:00
if cwm . ItemID64 == p . ItemID64 && cwm . Quantity == p . Quantity {
2019-12-12 11:24:12 +01:00
//log.Printf("jobGDepositForward : match (%d / %d).\n", cwm.ItemID64, cwm.Quantity)
2019-08-30 04:14:30 +02:00
gDepositForwardMux . Lock ( )
gDepositForwardMsg = append ( gDepositForwardMsg , j . Trigger )
gDepositForwardMux . Unlock ( )
2019-08-27 17:10:57 +02:00
err = setJobDone ( j . ID64 )
2019-12-16 11:23:46 +01:00
logOnError ( err , "jobGDepositForward : setJobDone" )
2019-08-27 17:10:57 +02:00
} else {
2019-12-12 11:24:12 +01:00
//log.Printf("jobGDepositForward : found (%d / %d), expected (%d / %d).\n", cwm.ItemID64, cwm.Quantity, p.ItemID64, p.Quantity)
2019-08-29 12:24:21 +02:00
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGDepositForward : rescheduleJob" )
2020-01-02 12:43:53 +01:00
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_g_deposit_req ` ] )
2019-08-29 12:24:21 +02:00
2019-08-27 17:10:57 +02:00
}
return
}
2019-08-19 12:41:43 +02:00
func jobGDeposit ( j Job ) {
var p JobPayloadGDeposit
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGDeposit : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGDeposit : Unmarshal payload" )
2019-08-21 05:46:42 +02:00
if p . Status == 0 { /* handle remaining resources to be stored */
2019-08-23 08:03:22 +02:00
var res , misc , alch , craft , equip bool = false , false , false , false , false
2019-08-27 05:58:23 +02:00
var delay time . Duration = 0 * time . Second
var b [ ] byte
2019-08-21 05:46:42 +02:00
if len ( p . ResObjID64 ) > 0 {
2019-08-23 12:46:51 +02:00
for i := range p . ResObjID64 {
obj , err := getObjItem ( p . ResObjID64 [ i ] )
logOnError ( err , "jobGDeposit : getObjItem" )
if err == nil {
switch obj . ItemTypeID {
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_res ` ] :
2019-08-23 12:46:51 +02:00
res = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_alch ` ] :
2019-08-23 12:46:51 +02:00
alch = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_misc ` ] :
2019-08-23 12:46:51 +02:00
misc = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_recipe ` ] :
2019-08-23 12:46:51 +02:00
craft = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_part ` ] :
2019-08-23 12:46:51 +02:00
craft = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType [ ` item_other ` ] :
2019-08-23 12:46:51 +02:00
equip = true
2020-01-02 13:12:32 +01:00
case cacheObjSubType [ ` item_unique ` ] :
2019-08-23 12:46:51 +02:00
equip = true
default :
}
2019-08-23 08:03:22 +02:00
}
}
}
2019-08-27 05:58:23 +02:00
2019-08-23 08:11:15 +02:00
if res {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay ( p . ChatID64 , ` 📦Resources ` , delay )
2020-01-02 12:35:03 +01:00
p . Status = cacheObjSubType [ ` msg_stock_ack ` ]
2019-08-27 06:40:12 +02:00
b , _ = json . Marshal ( & p )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit ` ] , j . UserID64 , cacheObjSubType [ ` msg_stock_ack ` ] , b , 1 * time . Minute )
2019-08-23 14:09:08 +02:00
logOnError ( err , "jobGDeposit : createJobCallback" )
2019-08-24 07:36:45 +02:00
delay = delay + 2 * time . Second
2019-08-23 08:11:15 +02:00
}
if alch {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay ( p . ChatID64 , ` ⚗️ Alchemy ` , delay )
2019-08-30 09:23:56 +02:00
p . Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b , _ = json . Marshal ( & p )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit ` ] , j . UserID64 , cacheObjSubType [ ` msg_orderbook_acl ` ] , b , 1 * time . Minute )
2019-08-23 14:09:08 +02:00
logOnError ( err , "jobGDeposit : createJobCallback" )
2019-08-24 07:36:45 +02:00
delay = delay + 2 * time . Second
2019-08-23 08:11:15 +02:00
}
if misc {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay ( p . ChatID64 , ` 🗃Misc ` , delay )
2019-08-30 09:23:56 +02:00
p . Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b , _ = json . Marshal ( & p )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit ` ] , j . UserID64 , cacheObjSubType [ ` msg_orderbook_acl ` ] , b , 1 * time . Minute )
2019-08-23 14:09:08 +02:00
logOnError ( err , "jobGDeposit : createJobCallback" )
2019-08-24 07:36:45 +02:00
delay = delay + 2 * time . Second
2019-08-23 08:11:15 +02:00
}
if craft {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay ( p . ChatID64 , ` ⚒Crafting ` , delay )
2020-01-02 12:35:03 +01:00
p . Status = cacheObjSubType [ ` msg_stock_any_ack ` ]
2019-08-27 06:40:12 +02:00
b , _ = json . Marshal ( & p )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit ` ] , j . UserID64 , cacheObjSubType [ ` msg_stock_any_ack ` ] , b , 1 * time . Minute )
2019-08-23 14:09:08 +02:00
logOnError ( err , "jobGDeposit : createJobCallback" )
2019-08-24 07:36:45 +02:00
delay = delay + 2 * time . Second
2019-08-23 08:11:15 +02:00
}
if equip {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay ( p . ChatID64 , ` 🏷Equipment ` , delay )
2019-08-30 09:23:56 +02:00
p . Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b , _ = json . Marshal ( & p )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit ` ] , j . UserID64 , cacheObjSubType [ ` msg_orderbook_acl ` ] , b , 1 * time . Minute )
2019-08-23 14:09:08 +02:00
logOnError ( err , "jobGDeposit : createJobCallback" )
2019-08-24 07:36:45 +02:00
delay = delay + 2 * time . Second
2019-08-23 08:11:15 +02:00
}
2019-08-23 08:08:45 +02:00
2019-08-21 05:46:42 +02:00
return
2020-01-02 12:35:03 +01:00
} else if p . Status == 1 { /* handle that one resource from the cacheObjSubType[`msg_orderbook_acl`] msg */
2019-08-29 13:08:13 +02:00
log . Printf ( "jobGDeposit : 1 : %d.\n" , j . Trigger )
2019-08-21 05:46:42 +02:00
2020-01-02 12:35:03 +01:00
} else if p . Status == cacheObjSubType [ ` msg_stock_ack ` ] {
//log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_ack`] : %d.\n", j.Trigger)
2019-08-27 06:50:50 +02:00
msg , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGDeposit : getObjMsg" )
rule , err := getMsgParsingRule ( msg )
logOnError ( err , "jobGDeposit : getMsgParsingRule" )
2019-08-27 06:51:32 +02:00
cwm , err := parseSubTypeMessageStockAck ( msg , rule . re )
2019-08-29 12:24:21 +02:00
for stockIdx := range cwm . Stock {
for resIdx := range p . ResObjID64 {
if cwm . Stock [ stockIdx ] . ItemID64 == p . ResObjID64 [ resIdx ] {
2020-01-02 12:35:03 +01:00
//log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_ack`] : Matching ItemID %d (%d).\n", p.ResObjID64[resIdx], cwm.Stock[stockIdx].Quantity)
2019-08-29 12:24:21 +02:00
item , _ := getObjItem ( p . ResObjID64 [ resIdx ] )
2019-08-29 14:15:26 +02:00
clientSendCWMsg ( p . ChatID64 , fmt . Sprintf ( "/g_deposit %s %d" , item . Code , cwm . Stock [ stockIdx ] . Quantity ) )
2019-08-29 12:24:21 +02:00
p2 := JobPayloadGDepositForward {
ItemID64 : p . ResObjID64 [ resIdx ] ,
2019-08-29 14:15:26 +02:00
Quantity : cwm . Stock [ stockIdx ] . Quantity ,
2019-08-29 12:24:21 +02:00
}
b2 , _ := json . Marshal ( p2 )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit_fwd ` ] , j . UserID64 , cacheObjSubType [ ` msg_g_deposit_req ` ] , b2 , time . Duration ( 1 * time . Minute ) )
2019-08-27 06:50:50 +02:00
}
}
}
2020-01-02 12:35:03 +01:00
} else if p . Status == cacheObjSubType [ ` msg_stock_any_ack ` ] {
log . Printf ( "jobGDeposit : cacheObjSubType[`msg_stock_any_ack`] : %d.\n" , j . Trigger )
2019-08-30 09:20:03 +02:00
msg , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGDeposit : getObjMsg" )
rule , err := getMsgParsingRule ( msg )
logOnError ( err , "jobGDeposit : getMsgParsingRule" )
2019-08-30 09:21:13 +02:00
cwm , err := parseSubTypeMessageStockAnyAck ( msg , rule . re )
2019-08-30 09:20:03 +02:00
for stockIdx := range cwm . Stock {
for resIdx := range p . ResObjID64 {
if cwm . Stock [ stockIdx ] . ItemID64 == p . ResObjID64 [ resIdx ] {
2020-01-02 12:35:03 +01:00
log . Printf ( "jobGDeposit : cacheObjSubType[`msg_stock_any_ack`] : Matching ItemID %d (%d).\n" , p . ResObjID64 [ resIdx ] , cwm . Stock [ stockIdx ] . Quantity )
2019-08-30 09:20:03 +02:00
item , _ := getObjItem ( p . ResObjID64 [ resIdx ] )
clientSendCWMsg ( p . ChatID64 , fmt . Sprintf ( "/g_deposit %s %d" , item . Code , cwm . Stock [ stockIdx ] . Quantity ) )
p2 := JobPayloadGDepositForward {
ItemID64 : p . ResObjID64 [ resIdx ] ,
Quantity : cwm . Stock [ stockIdx ] . Quantity ,
}
b2 , _ := json . Marshal ( p2 )
2020-01-02 13:04:12 +01:00
err = createJobCallback ( cacheObjSubType [ ` job_gdeposit_fwd ` ] , j . UserID64 , cacheObjSubType [ ` msg_g_deposit_req ` ] , b2 , time . Duration ( 1 * time . Minute ) )
2019-08-30 09:20:03 +02:00
}
}
}
2019-08-19 12:41:43 +02:00
}
err = setJobDone ( j . ID64 )
logOnError ( err , "jobGDeposit : setJobDone" )
return
}
2019-10-04 12:38:03 +02:00
2019-10-12 07:47:40 +02:00
func jobVaultItemStatus ( j Job ) {
2019-10-11 08:53:49 +02:00
var (
2019-10-13 09:21:51 +02:00
p JobPayloadVaultItemStatus
itemID64 , currentItemID64 int64
user , deposit , withdraw int64
userList , depositList , withdrawList [ ] int64
2019-10-12 07:47:40 +02:00
)
err := setJobStart ( j . ID64 )
logOnError ( err , "jobVaultItemStatus : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobVaultItemStatus : Unmarshal payload" )
2019-10-13 09:21:51 +02:00
stmt := ` SELECT x . item_id
, x . user_id
, ( SELECT COALESCE ( SUM ( omv . quantity ) , 0 )
FROM obj_msg_vault_v omv
WHERE omv . user_id = x . user_id
AND omv . item_id = x . item_id
2020-01-02 12:40:45 +01:00
AND omv . msg_type_id = ` + strconv.FormatInt(cacheObjSubType[ ` msg_g_deposit_ack ` ], 10) + `
2019-10-13 09:21:51 +02:00
AND omv . chat_id = x . chat_id ) deposit
, ( SELECT COALESCE ( SUM ( omv . quantity ) , 0 )
FROM obj_msg_vault_v omv
WHERE omv . user_id = x . user_id
AND omv . item_id = x . item_id
2020-01-02 12:40:45 +01:00
AND omv . msg_type_id = ` + strconv.FormatInt(cacheObjSubType[ ` msg_withdraw_rcv ` ], 10) + `
2019-10-13 09:21:51 +02:00
AND omv . chat_id = x . chat_id ) withdraw
FROM ( SELECT DISTINCT
omv . user_id
, omv . chat_id
, omv . item_id
FROM obj_msg_vault_v omv
WHERE omv . chat_id = ?
AND omv . item_id in ( ? ` + strings.Repeat(",?", len(p.ItemListID64)-1) + ` ) ) x
ORDER BY x . user_id ASC ; `
args := make ( [ ] interface { } , len ( p . ItemListID64 ) + 1 )
args [ 0 ] = p . DepositChatID64
for i , id := range p . ItemListID64 {
args [ i + 1 ] = id
}
rows , err := db . Query ( stmt , args ... )
logOnError ( err , "jobVaultItemStatus : Get rows" )
if err != nil {
err = setJobDone ( j . ID64 )
logOnError ( err , "jobVaultItemStatus : setJobDone" )
return
}
currentItemID64 = 0
for rows . Next ( ) {
2019-10-13 09:24:11 +02:00
err = rows . Scan ( & itemID64 , & user , & deposit , & withdraw )
2019-10-13 09:21:51 +02:00
logOnError ( err , "jobVaultItemStatus : scan next val" )
if itemID64 != currentItemID64 {
2019-10-13 09:23:25 +02:00
if currentItemID64 != 0 {
2019-10-13 09:21:51 +02:00
// display info
out := fmt . Sprintf ( "<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n" , ` User ` )
for i , userId := range userList {
logOnError ( err , "jobVaultItemStatus : getObjItem" )
2020-02-01 12:13:37 +01:00
chat , err := bot . ChatByID ( strconv . FormatInt ( userId , 10 ) )
logOnError ( err , "jobVaultItemStatus : ChatByID" )
if err == nil {
2020-02-01 12:17:17 +01:00
out = fmt . Sprintf ( "%s@%-31s |%6d |%6d |%6d\n" , out , chat . Username , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2020-02-01 12:13:37 +01:00
} else {
2020-02-01 12:17:17 +01:00
out = fmt . Sprintf ( "%s#%-31d |%6d |%6d |%6d\n" , out , userId , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2020-02-01 12:13:37 +01:00
}
2019-10-13 09:21:51 +02:00
}
out = fmt . Sprintf ( "%s</code>" , out )
c := TGCommand {
Type : commandSendMsg ,
Text : out ,
ToChatID64 : p . UserID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
}
2019-10-13 09:23:25 +02:00
currentItemID64 = itemID64
userList = nil
2019-10-13 09:21:51 +02:00
depositList = nil
withdrawList = nil
}
2019-10-13 09:24:11 +02:00
userList = append ( userList , user )
2019-10-13 09:21:51 +02:00
depositList = append ( depositList , deposit )
withdrawList = append ( withdrawList , withdraw )
}
2019-10-13 09:23:25 +02:00
if currentItemID64 != 0 {
2019-10-13 09:21:51 +02:00
// display info
out := fmt . Sprintf ( "<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n" , ` User ` )
for i , userId := range userList {
logOnError ( err , "jobVaultItemStatus : getObjItem" )
2020-02-01 12:15:55 +01:00
chat , err := bot . ChatByID ( strconv . FormatInt ( userId , 10 ) )
logOnError ( err , "jobVaultItemStatus : ChatByID" )
if err == nil {
2020-02-01 12:17:17 +01:00
out = fmt . Sprintf ( "%s@%-31s |%6d |%6d |%6d\n" , out , chat . Username , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2020-02-01 12:15:55 +01:00
} else {
2020-02-01 12:17:17 +01:00
out = fmt . Sprintf ( "%s#%-31d |%6d |%6d |%6d\n" , out , userId , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2020-02-01 12:15:55 +01:00
}
2019-10-13 09:21:51 +02:00
}
out = fmt . Sprintf ( "%s</code>" , out )
c := TGCommand {
Type : commandSendMsg ,
Text : out ,
ToChatID64 : p . UserID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
2020-02-01 11:52:15 +01:00
} else {
c := TGCommand {
Type : commandSendMsg ,
Text : "Nothing to report" ,
ToChatID64 : p . UserID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
2019-10-13 09:21:51 +02:00
}
err = rows . Err ( )
logOnError ( err , "jobVaultItemStatus : query end" )
rows . Close ( )
2019-10-12 07:47:40 +02:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobVaultItemStatus : setJobDone" )
return
}
func jobVaultUserStatus ( j Job ) {
var (
p JobPayloadVaultUserStatus
2019-10-11 08:58:18 +02:00
userID64 , currentUserID64 int64
itemID64 , deposit , withdraw int64
2019-10-11 08:53:49 +02:00
itemList , depositList , withdrawList [ ] int64
)
2019-10-11 08:58:18 +02:00
2019-10-11 07:18:53 +02:00
err := setJobStart ( j . ID64 )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : setJobStart" )
2019-10-11 07:18:53 +02:00
err = json . Unmarshal ( j . Payload , & p )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : Unmarshal payload" )
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
stmt := ` SELECT x . user_id
, x . item_id
2019-10-13 09:21:51 +02:00
, ( SELECT COALESCE ( SUM ( omv . quantity ) , 0 )
FROM obj_msg_vault_v omv
WHERE omv . user_id = x . user_id
AND omv . item_id = x . item_id
2020-01-02 12:40:45 +01:00
AND omv . msg_type_id = ` + strconv.FormatInt(cacheObjSubType[ ` msg_g_deposit_ack ` ], 10) + `
2019-10-13 09:21:51 +02:00
AND omv . chat_id = x . chat_id ) deposit
, ( SELECT COALESCE ( SUM ( omv . quantity ) , 0 )
FROM obj_msg_vault_v omv
WHERE omv . user_id = x . user_id
AND omv . item_id = x . item_id
2020-01-02 12:40:45 +01:00
AND omv . msg_type_id = ` + strconv.FormatInt(cacheObjSubType[ ` msg_withdraw_rcv ` ], 10) + `
2019-10-13 09:21:51 +02:00
AND omv . chat_id = x . chat_id ) withdraw
2019-10-11 08:53:49 +02:00
FROM ( SELECT DISTINCT
omv . user_id
, omv . chat_id
, omv . item_id
FROM obj_msg_vault_v omv
WHERE omv . chat_id = ?
2019-10-13 09:21:51 +02:00
AND omv . user_id IN ( ? ` + strings.Repeat(",?", len(p.UserListID64)-1) + ` )
AND omv . item_type_id IN ( ? ` + strings.Repeat(",?", len(p.ItemTypeListID64)-1) + ` ) ) x
2020-02-01 12:24:44 +01:00
ORDER BY x . user_id ASC
, ( SELECT oi . intl_id FROM obj_item oi WHERE oi . obj_id = x . item_id ) ASC ; `
2019-10-11 08:58:18 +02:00
2019-10-11 09:02:29 +02:00
args := make ( [ ] interface { } , len ( p . UserListID64 ) + len ( p . ItemTypeListID64 ) + 1 )
2019-10-11 09:58:37 +02:00
args [ 0 ] = p . DepositChatID64
2019-10-11 09:02:29 +02:00
for i , id := range p . UserListID64 {
args [ i + 1 ] = id
}
for i , id := range p . ItemTypeListID64 {
args [ i + 1 + len ( p . UserListID64 ) ] = id
}
2019-10-11 08:58:18 +02:00
2019-10-11 09:02:29 +02:00
rows , err := db . Query ( stmt , args ... )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : Get rows" )
2019-10-11 08:53:49 +02:00
if err != nil {
err = setJobDone ( j . ID64 )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : setJobDone" )
2019-10-11 08:58:18 +02:00
return
2019-10-11 08:53:49 +02:00
}
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
currentUserID64 = 0
for rows . Next ( ) {
err = rows . Scan ( & userID64 , & itemID64 , & deposit , & withdraw )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : scan next val" )
2019-10-11 08:53:49 +02:00
if userID64 != currentUserID64 {
if currentUserID64 != 0 {
// display info
2019-10-13 09:21:51 +02:00
out := fmt . Sprintf ( "<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n" , ` Item ` )
for i , itemId := range itemList {
item , err := getObjItem ( itemId )
logOnError ( err , "jobVaultUserStatus : getObjItem" )
2020-01-12 13:10:30 +01:00
out = fmt . Sprintf ( "%s%-32s |%6d |%6d |%6d\n" , out , item . Names [ 0 ] , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2019-10-13 09:21:51 +02:00
}
out = fmt . Sprintf ( "%s</code>" , out )
c := TGCommand {
Type : commandSendMsg ,
Text : out ,
ToChatID64 : p . UserID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
2019-10-11 08:53:49 +02:00
}
2019-10-11 10:00:13 +02:00
currentUserID64 = userID64
2019-10-11 08:53:49 +02:00
itemList = nil
depositList = nil
withdrawList = nil
}
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
itemList = append ( itemList , itemID64 )
depositList = append ( depositList , deposit )
withdrawList = append ( withdrawList , withdraw )
}
if currentUserID64 != 0 {
//display info
2019-10-11 10:17:08 +02:00
out := fmt . Sprintf ( "<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n" , ` Item ` )
2019-10-11 09:45:53 +02:00
for i , itemId := range itemList {
item , err := getObjItem ( itemId )
2019-10-13 09:21:51 +02:00
logOnError ( err , "jobVaultUserStatus : getObjItem" )
2020-01-12 13:10:30 +01:00
out = fmt . Sprintf ( "%s%-32s |%6d |%6d |%6d\n" , out , item . Names [ 0 ] , depositList [ i ] , withdrawList [ i ] , depositList [ i ] - withdrawList [ i ] )
2019-10-11 09:45:53 +02:00
}
2019-10-11 10:17:08 +02:00
out = fmt . Sprintf ( "%s</code>" , out )
2019-10-11 10:19:39 +02:00
2019-10-11 09:45:53 +02:00
c := TGCommand {
2019-10-11 10:26:13 +02:00
Type : commandSendMsg ,
Text : out ,
2019-10-11 10:29:31 +02:00
ToChatID64 : p . UserID64 ,
2019-10-11 10:26:13 +02:00
ParseMode : cmdParseModeHTML ,
2019-10-11 09:45:53 +02:00
}
TGCmdQueue <- c
2019-10-11 08:53:49 +02:00
}
err = rows . Err ( )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : query end" )
2019-10-11 08:53:49 +02:00
rows . Close ( )
2019-10-11 07:18:53 +02:00
err = setJobDone ( j . ID64 )
2019-10-12 07:47:40 +02:00
logOnError ( err , "jobVaultUserStatus : setJobDone" )
2019-10-11 07:18:53 +02:00
return
}
2019-10-04 12:38:03 +02:00
func jobGWithdraw ( j Job ) {
2020-01-06 04:58:46 +01:00
var (
2020-01-30 05:41:47 +01:00
p JobPayloadGWithdraw
p2 JobPayloadGetVault
b [ ] byte
2020-01-28 10:54:26 +01:00
vault map [ string ] int64
2020-01-06 04:58:46 +01:00
)
2020-01-30 05:41:47 +01:00
log . Printf ( "jobGWithdraw[%d] : Starting handling job.\n" , j . ID64 )
2020-01-13 09:42:26 +01:00
2019-10-04 12:38:03 +02:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGWithdraw : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGWithdraw : Unmarshal payload" )
2020-01-05 07:23:36 +01:00
if p . Status == 0 {
2020-01-27 05:05:38 +01:00
p2 . JobCallbackID64 = j . ID64
p2 . ItemTypeList = make ( [ ] int64 , 0 )
2020-01-21 08:40:32 +01:00
for k , item := range p . Items {
2020-01-14 04:34:29 +01:00
id := getSilentObjItemID ( item . Code , ` ` )
2020-01-05 07:23:36 +01:00
if id != 0 {
obj , _ := getObjItem ( id )
2020-01-27 05:05:38 +01:00
p2 . ItemTypeList = append ( p2 . ItemTypeList , obj . ItemTypeID )
2020-01-14 04:38:59 +01:00
} else if ok , _ := regexp . MatchString ( ` ^u[0-9]+$ ` , item . Code ) ; ok {
2020-01-21 08:40:32 +01:00
p . Items [ k ] . Inspect = true
2020-01-27 05:05:38 +01:00
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_other ` ] )
2020-01-05 07:23:36 +01:00
}
}
2020-01-06 04:58:46 +01:00
2020-01-27 05:05:38 +01:00
b , err = json . Marshal ( p2 )
logOnError ( err , "jobGWithdraw : Marshal(p2)" )
jobID64 , err := createJob ( cacheObjSubType [ ` job_get_vault ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . UTC ( ) , b )
p . Status = 1
p . VaultJobID64 = jobID64
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON(p)" )
2020-01-30 05:41:47 +01:00
log . Printf ( "jobGWithdraw[%d] : Calling GetVault job.\n" , j . ID64 )
2020-01-27 05:05:38 +01:00
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
} else if p . Status == 1 {
/* loop through items and get unique/inspect */
2020-01-28 10:54:26 +01:00
b = getJobPayload ( p . VaultJobID64 )
err = json . Unmarshal ( b , & p2 )
logOnError ( err , "jobGStock : Unmarshal(p2)" )
2020-01-30 05:41:47 +01:00
2020-01-28 10:54:26 +01:00
vault = make ( map [ string ] int64 )
for _ , i := range p2 . Vault {
vault [ i . Code ] = i . Quantity
}
for k , req := range p . Items {
ava , _ := vault [ req . Code ]
p . Items [ k ] . Available = ava
2020-01-30 05:41:47 +01:00
if ok , _ := regexp . MatchString ( ` ^u[0-9]+$ ` , req . Code ) ; ok {
if ava > 0 {
p . Items [ k ] . Inspect = false
} else if p . Inspecting == ` ` {
p . Inspecting = req . Code
}
}
}
p . Status = 2
err = setJobPayloadJSON ( j . ID64 , p )
log . Printf ( "jobGWithdraw[%d] : received GetVault job.\n" , j . ID64 )
if p . Inspecting != ` ` {
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_g_inspect_req ` ] )
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_inspect_ack ` ] )
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_invalid_action ` ] )
clientSendCWMsg ( j . UserID64 , fmt . Sprintf ( "/g_inspect_%s" , p . Inspecting ) )
log . Printf ( "jobGWithdraw[%d] : Inspecting missing unique item.\n" , j . ID64 )
return
2020-01-28 10:54:26 +01:00
}
2020-01-05 07:23:36 +01:00
}
2020-01-05 08:27:07 +01:00
2020-01-13 09:59:00 +01:00
if j . Trigger != 0 {
2020-01-13 09:59:28 +01:00
id , err := getObjSubTypeId ( j . Trigger )
2020-01-13 09:59:00 +01:00
logOnError ( err , "jobGWithdraw : getObjSubType(" + strconv . FormatInt ( j . Trigger , 10 ) + ")" )
2020-01-17 05:24:07 +01:00
if err == nil {
2020-01-30 05:41:47 +01:00
m , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGWithdraw : getObjMsg" )
rule , err := getMsgParsingRule ( m )
logOnError ( err , "jobGWithdraw : getMsgParsingRule" )
p . CleanupMsg = append ( p . CleanupMsg , * m )
switch id {
case cacheObjSubType [ ` msg_g_inspect_req ` ] :
log . Printf ( "jobGWithdraw[%d] : Deleting unique inspect req.\n" , j . ID64 )
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON" )
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
case cacheObjSubType [ ` msg_inspect_ack ` ] :
log . Printf ( "jobGWithdraw[%d] : Handling unique inspect ack.\n" , j . ID64 )
2020-01-22 10:07:14 +01:00
cwm , err := parseSubTypeMessageInspectAck ( m , rule . re )
logOnError ( err , "jobGWithdraw : parseSubTypeMessageInspectAck" )
for k , req := range p . Items {
if req . Code == p . Inspecting {
p . Items [ k ] . Available = 1
p . Items [ k ] . Name = cwm . Name
p . Items [ k ] . Inspect = false
break
}
}
p . Inspecting = ` `
2020-01-14 04:34:29 +01:00
2020-01-30 05:41:47 +01:00
for k , req := range p . Items {
2020-01-22 10:07:14 +01:00
if req . Inspect {
p . Inspecting = req . Code
2020-01-30 05:41:47 +01:00
p . Items [ k ] . Inspect = false
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON" )
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
2020-01-22 10:07:14 +01:00
2020-01-30 05:41:47 +01:00
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_g_inspect_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_invalid_action ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_inspect_ack ` ] )
2020-01-22 10:07:14 +01:00
clientSendCWMsg ( j . UserID64 , fmt . Sprintf ( "/g_inspect_%s" , p . Inspecting ) )
2020-01-30 05:41:47 +01:00
2020-01-22 10:07:14 +01:00
return
}
}
2020-01-30 05:41:47 +01:00
case cacheObjSubType [ ` msg_invalid_action ` ] :
log . Printf ( "jobGWithdraw[%d] : Handling invalid unique item.\n" , j . ID64 )
2020-01-22 10:07:14 +01:00
for k , req := range p . Items {
if req . Code == p . Inspecting {
2020-01-30 05:41:47 +01:00
p . Items [ k ] . Available = 0
2020-01-22 10:07:14 +01:00
p . Items [ k ] . Inspect = false
break
}
}
p . Inspecting = ` `
2020-01-14 04:34:29 +01:00
2020-01-30 05:41:47 +01:00
for k , req := range p . Items {
2020-01-23 07:09:07 +01:00
if req . Inspect {
p . Inspecting = req . Code
2020-01-30 05:41:47 +01:00
p . Items [ k ] . Inspect = false
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON" )
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
2020-01-23 07:09:07 +01:00
2020-01-30 05:41:47 +01:00
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_g_inspect_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_invalid_action ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_inspect_ack ` ] )
2020-01-23 07:09:07 +01:00
clientSendCWMsg ( j . UserID64 , fmt . Sprintf ( "/g_inspect_%s" , p . Inspecting ) )
logOnError ( err , "jobGWithdraw : setJobDone" )
return
}
}
2020-01-30 05:41:47 +01:00
case cacheObjSubType [ ` msg_msg_job_gwithdraw_ack ` ] :
log . Printf ( "jobGWithdraw[%d] : Handling withdrawal validation ack.\n" , j . ID64 )
2020-01-17 05:24:07 +01:00
cwm , err := parseSubTypeMessageJobGWithdrawAck ( m , rule . re )
logOnError ( err , "jobGWithdraw : parseSubTypeMessageJobGWithdrawAck" )
in , err := hex . DecodeString ( cwm . Ref )
logOnError ( err , "msgJobGWithdrawAck : DecodeString" )
sha256 := sha256 . Sum256 ( [ ] byte ( cfg . Telegram . Token ) )
sha128 := sha256 [ : aes . BlockSize ]
c , err := aes . NewCipher ( sha128 )
out := make ( [ ] byte , len ( in ) )
c . Decrypt ( out , in )
2020-01-30 05:42:29 +01:00
uid , _ := binary . Uvarint ( out [ : 8 ] )
jobID64 := int64 ( uid )
uid , _ = binary . Uvarint ( out [ 8 : 16 ] )
userID64 := int64 ( uid )
2020-01-17 05:24:07 +01:00
if jobID64 == j . ID64 {
if userID64 == cwm . Msg . TGSenderUserID64 {
cmd := TGCommand {
Type : commandReplyMsg ,
Text : "You cannot validate your own withdrawl" ,
2020-01-17 05:24:49 +01:00
FromMsgID64 : cwm . Msg . ID64 ,
2020-01-17 05:24:07 +01:00
FromChatID64 : cwm . Msg . ChatID64 ,
}
TGCmdQueue <- cmd
2020-01-30 05:41:47 +01:00
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON" )
2020-01-17 05:24:07 +01:00
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
2020-01-17 05:28:00 +01:00
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_job_gwithdraw_ack ` ] )
2020-01-17 05:24:07 +01:00
return
} else {
p . Validated = true
2020-01-30 05:41:47 +01:00
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_withdraw_code ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_withdraw_req ` ] )
// TODO
}
} else {
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_job_gwithdraw_ack ` ] )
return
}
return
case cacheObjSubType [ ` msg_withdraw_code ` ] :
log . Printf ( "jobGWithdraw[%d] : Handling withdraw code.\n" , j . ID64 )
if false /* check if it's ours */ {
for _ , d := range p . CleanupMsg {
if d . TGSenderUserID64 == int64 ( bot . Me . ID ) {
2020-01-17 05:24:07 +01:00
delmsg := tb . StoredMessage {
2020-01-17 05:27:01 +01:00
MessageID : fmt . Sprintf ( "%d" , d . ID64 ) ,
2020-01-17 05:24:07 +01:00
ChatID : d . ChatID64 ,
}
err = bot . Delete ( delmsg )
logOnError ( err , "jobGWithdraw : Delete" )
2020-01-30 05:41:47 +01:00
} else {
2020-01-17 05:24:07 +01:00
}
2020-01-30 05:41:47 +01:00
2020-01-17 05:24:07 +01:00
}
2020-01-30 05:41:47 +01:00
p . CleanupMsg = [ ] ChatWarsMessage { }
2020-01-17 05:24:07 +01:00
} else {
err = rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
logOnError ( err , "jobGWithdraw : rescheduleJob" )
2020-01-17 05:27:01 +01:00
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_job_gwithdraw_ack ` ] )
2020-01-17 05:24:07 +01:00
}
2020-01-30 05:41:47 +01:00
default :
log . Printf ( "jobGWithdraw[%d] : No handling for this message.\n" , j . ID64 )
2020-01-13 09:59:00 +01:00
}
}
}
2020-01-27 05:05:38 +01:00
/ *
c , err := getLockedRoleClient ( ` commander ` )
logOnError ( err , "jobGWithdraw: getLockedRoleClient(commander)" )
if err == nil {
c . Mux . Unlock ( )
2020-01-13 09:03:01 +01:00
}
2020-01-27 05:05:38 +01:00
* /
2020-01-13 09:03:01 +01:00
2020-01-30 05:41:47 +01:00
log . Printf ( "jobGWithdraw[%d] : Preparing withdrawal guild link.\n" , j . ID64 )
2020-01-27 05:05:38 +01:00
var stock string
for _ , i := range p . Items {
if i . Available > i . Required {
stock = fmt . Sprintf ( "%s\n%d x %s" , stock , i . Required , i . Name )
} else if i . Available > 0 {
stock = fmt . Sprintf ( "%s\n%d x %s" , stock , i . Available , i . Name )
2020-01-13 09:03:01 +01:00
}
2020-01-27 05:05:38 +01:00
}
2020-01-13 09:03:01 +01:00
2020-01-27 05:05:38 +01:00
if len ( stock ) > 0 {
2020-01-30 05:43:38 +01:00
err := setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobGWithdraw : setJobPayloadJSON" )
2020-01-30 05:41:47 +01:00
setJobCallback ( j . ID64 , int64 ( bot . Me . ID ) , cacheObjSubType [ ` msg_job_gwithdraw_ack ` ] )
2020-01-13 09:03:01 +01:00
2020-01-27 05:05:38 +01:00
sha256 := sha256 . Sum256 ( [ ] byte ( cfg . Telegram . Token ) )
sha128 := sha256 [ : aes . BlockSize ]
c , err := aes . NewCipher ( sha128 )
2020-01-13 09:03:01 +01:00
2020-01-27 05:05:38 +01:00
in := make ( [ ] byte , 0 )
buf := make ( [ ] byte , 8 )
2020-01-30 05:41:47 +01:00
binary . LittleEndian . PutUint64 ( buf , uint64 ( j . ID64 ) )
2020-01-27 05:05:38 +01:00
in = append ( in , buf ... )
binary . LittleEndian . PutUint64 ( buf , uint64 ( p . UserID64 ) )
in = append ( in , buf ... )
out := make ( [ ] byte , len ( in ) )
2020-01-13 09:03:01 +01:00
2020-01-27 05:05:38 +01:00
ref := hex . EncodeToString ( in )
log . Printf ( "jobGWithdraw[%d] : in string : %s.\n" , j . ID64 , ref )
c . Encrypt ( out , in )
ref = hex . EncodeToString ( out )
log . Printf ( "jobGWithdraw[%d] : out string : %s.\n" , j . ID64 , ref )
2020-01-16 15:39:48 +01:00
2020-01-27 05:05:38 +01:00
ref = hex . EncodeToString ( out )
2020-01-16 16:34:21 +01:00
2020-01-27 05:05:38 +01:00
m , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGWithdraw : getObjMsg" )
2020-01-17 05:31:30 +01:00
2020-01-27 05:05:38 +01:00
p . CleanupMsg = append ( p . CleanupMsg , * m )
2020-01-17 05:31:30 +01:00
2020-01-27 05:05:38 +01:00
b , err = json . Marshal ( p )
log . Printf ( "jobGWithdraw[%d] : %s\n" , j . ID64 , string ( b ) )
2020-01-16 16:34:21 +01:00
2020-01-27 05:05:38 +01:00
u , err := bot . ChatByID ( fmt . Sprintf ( "%d" , p . UserID64 ) )
logOnError ( err , "jobGWithdraw : ChatByID" )
2020-01-17 05:24:07 +01:00
2020-01-27 05:05:38 +01:00
msg := fmt . Sprintf ( "Click to validate @%s's withdrawal of<code>%s</code>\n/withdraw_%s" , u . Username , stock , string ( ref ) )
2020-01-16 16:34:21 +01:00
2020-01-27 05:05:38 +01:00
cmd := TGCommand {
Type : commandSendMsg ,
Text : msg ,
ToChatID64 : cfg . Bot . Mainchat ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- cmd
2020-01-30 05:41:47 +01:00
return
2020-01-27 05:05:38 +01:00
} else {
cmd := TGCommand {
Type : commandReplyMsg ,
Text : "No stock available whatsoever" ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
ParseMode : cmdParseModeHTML ,
2020-01-16 14:40:29 +01:00
}
2020-01-27 05:05:38 +01:00
TGCmdQueue <- cmd
2019-10-04 12:38:03 +02:00
}
err = setJobDone ( j . ID64 )
logOnError ( err , "jobGWithdraw : setJobDone" )
return
}
2019-12-12 11:24:12 +01:00
2019-12-13 09:34:32 +01:00
func jobSetDef ( j Job ) {
var p JobPayloadSetDef
err := setJobStart ( j . ID64 )
logOnError ( err , "jobSetDef : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobSetDef : Unmarshal payload" )
2019-12-13 10:37:20 +01:00
msg , err := getObjMsg ( j . Trigger )
2019-12-13 09:34:32 +01:00
logOnError ( err , "jobSetDef : getObjMsg msg" )
2019-12-13 10:37:20 +01:00
rule , err := getMsgParsingRule ( msg )
logOnError ( err , "jobSetDef : getMsgParsingRule" )
cwm , err := parseSubTypeMessageMeAck ( msg , rule . re )
if cwm . State == ` 🛌Rest ` {
clientSendCWMsg ( j . UserID64 , ` 🛡Defend ` )
}
2019-12-13 09:34:32 +01:00
err = setJobDone ( j . ID64 )
logOnError ( err , "jobSetDef : setJobDone" )
return
}
2019-12-19 04:31:28 +01:00
func jobGetHammerTime ( j Job ) {
var p JobPayloadSetDef
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGetHammerTime : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGetHammerTime : Unmarshal payload" )
msg , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGetHammerTime : getObjMsg msg" )
rule , err := getMsgParsingRule ( msg )
logOnError ( err , "jobGetHammerTime : getMsgParsingRule" )
cwm , err := parseSubTypeMessageTimeAck ( msg , rule . re )
2019-12-19 04:39:02 +01:00
2019-12-20 10:34:03 +01:00
out := ` `
if hammerTimeNow ( cwm . TimeOfDay , cwm . Weather ) {
if hammerTimeNext ( cwm . TimeOfDay , cwm . WeatherNext ) ||
hammerTimeNext ( cwm . TimeOfDay , cwm . Weather ) {
out = ` Perfect weather for the next 2 hours, possibly 4. `
} else {
out = ` Perfect weather only for the next 2 hours. `
2019-12-19 04:33:11 +01:00
}
2019-12-29 08:50:56 +01:00
c := TGCommand {
Type : commandSendMsg ,
Text : out ,
ToChatID64 : cfg . Bot . Mainchat ,
ParseMode : cmdParseModeHTML ,
2019-12-20 10:34:03 +01:00
}
2019-12-29 08:50:56 +01:00
TGCmdQueue <- c
2019-12-20 10:34:03 +01:00
}
2019-12-29 08:50:56 +01:00
/ *
} else {
if hammerTimeNext ( cwm . TimeOfDay , cwm . WeatherNext ) ||
hammerTimeNext ( cwm . TimeOfDay , cwm . Weather ) {
out = ` Perfect weather maybe in 2 hours. `
} else {
out = ` No perfect weather in sight for the next 4 hours. `
}
}
* /
2019-12-19 04:39:02 +01:00
2019-12-19 04:31:28 +01:00
err = setJobDone ( j . ID64 )
2019-12-19 04:33:11 +01:00
logOnError ( err , "jobGetHammerTime : setJobDone" )
2019-12-19 04:31:28 +01:00
return
}
2020-01-26 14:40:20 +01:00
2020-01-27 05:05:38 +01:00
func jobCraftItem ( j Job ) {
var (
p JobPayloadCraftItem
p2 JobPayloadGetVault
b [ ] byte
item * ChatWarsItem
totalMana int64
requiredItems map [ string ] int64
missingItems map [ string ] int64
availableItems map [ string ] int64
craftItems map [ string ] int64
)
err := setJobStart ( j . ID64 )
logOnError ( err , "jobCraftItem : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobCraftItem : Unmarshal payload" )
if p . Status == 0 {
p2 . JobCallbackID64 = j . ID64
p2 . ItemTypeList = make ( [ ] int64 , 0 )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_res ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_recipe ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_part ` ] )
2020-01-27 05:33:55 +01:00
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_alch ` ] )
2020-01-27 05:05:38 +01:00
b , err = json . Marshal ( p2 )
logOnError ( err , "jobCraftItem : Marshal(p2)" )
jobID64 , err := createJob ( cacheObjSubType [ ` job_get_vault ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . UTC ( ) , b )
p . Status = 1
p . VaultJobID64 = jobID64
b , err = json . Marshal ( p )
logOnError ( err , "jobCraftItem : Marshal(p)" )
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobCraftItem : setJobPayloadJSON(p)" )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
}
b = getJobPayload ( p . VaultJobID64 )
err = json . Unmarshal ( b , & p2 )
logOnError ( err , "jobCraftItem : Unmarshal(p2)" )
2020-01-27 05:08:38 +01:00
item , err = getObjItem ( p . ObjItemID64 )
logOnError ( err , "jobCraftItem : getObjItem" )
2020-01-27 05:05:38 +01:00
availableItems = make ( map [ string ] int64 )
requiredItems = make ( map [ string ] int64 )
missingItems = make ( map [ string ] int64 )
2020-01-27 05:14:58 +01:00
craftItems = make ( map [ string ] int64 )
2020-01-27 05:43:29 +01:00
totalMana = item . Craft . Mana * p . Quantity
2020-01-27 05:05:38 +01:00
for _ , v := range item . Craft . Items {
2020-01-27 05:43:29 +01:00
requiredItems [ v . Code ] = v . Quantity * p . Quantity
2020-01-27 05:05:38 +01:00
missingItems [ v . Code ] = 0
craftItems [ v . Code ] = 0
}
for _ , v := range p2 . Vault {
availableItems [ v . Code ] = v . Quantity
}
update := true
for update {
update = false
for code , req := range requiredItems {
ava , _ := availableItems [ code ]
craft , _ := craftItems [ code ]
missing , _ := missingItems [ code ]
if ( ava + craft + missing ) < req {
update = true
2020-01-27 05:08:38 +01:00
obj , err := getObjItem ( getObjItemID ( code , ` ` ) )
logOnError ( err , "jobCraftItem : getObjItem" )
2020-01-27 05:05:38 +01:00
if obj . Craft != nil {
craftItems [ code ] = req - ava
2020-01-27 05:15:42 +01:00
totalMana = totalMana + ( req - ava ) * obj . Craft . Mana
2020-01-27 05:05:38 +01:00
for _ , v := range obj . Craft . Items {
req2 , _ := requiredItems [ v . Code ]
requiredItems [ v . Code ] = req2 + v . Quantity * ( req - ava )
}
} else {
2020-01-27 06:10:31 +01:00
if obj . Craftable {
2020-01-27 07:06:48 +01:00
w := TGCommand {
Type : commandSendMsg ,
Text : fmt . Sprintf ( "Item missing recipe : %s\n" , code ) ,
ToUserID64 : cfg . Bot . Admin ,
}
TGCmdQueue <- w
2020-01-27 06:10:31 +01:00
}
2020-01-27 05:05:38 +01:00
missingItems [ code ] = req - ava
}
}
}
}
/* we can finish the job */
2020-01-27 07:01:06 +01:00
out := fmt . Sprintf ( "<code>Summary for %d %s\n" , p . Quantity , item . Names [ 0 ] )
2020-01-27 06:34:52 +01:00
out = fmt . Sprintf ( "%s Mana : %d\n" , out , totalMana )
out = fmt . Sprintf ( "%s Items :\n" , out )
2020-01-27 06:22:13 +01:00
for k , v := range requiredItems {
2020-01-27 06:23:34 +01:00
obj , _ := getObjItem ( getObjItemID ( k , ` ` ) )
2020-01-27 06:22:13 +01:00
ava , _ := availableItems [ k ]
2020-01-27 06:34:52 +01:00
out = fmt . Sprintf ( "%s [%s] %s : %d (%d)\n" , out , obj . Code , obj . Names [ 0 ] , v , ava )
2020-01-27 06:22:13 +01:00
}
2020-01-27 06:34:52 +01:00
out = fmt . Sprintf ( "%s Missing :\n" , out )
2020-01-27 06:22:13 +01:00
for k , v := range missingItems {
2020-01-27 06:34:52 +01:00
if v > 0 {
obj , _ := getObjItem ( getObjItemID ( k , ` ` ) )
out = fmt . Sprintf ( "%s [%s] %s : %d\n" , out , obj . Code , obj . Names [ 0 ] , v )
}
2020-01-27 06:22:13 +01:00
}
2020-01-27 06:34:52 +01:00
out = fmt . Sprintf ( "%s To craft :\n" , out )
2020-01-27 06:22:13 +01:00
for k , v := range craftItems {
2020-01-27 06:34:52 +01:00
if v > 0 {
obj , _ := getObjItem ( getObjItemID ( k , ` ` ) )
out = fmt . Sprintf ( "%s [%s] %s : %d\n" , out , obj . Code , obj . Names [ 0 ] , v )
}
2020-01-27 06:22:13 +01:00
}
2020-01-27 06:34:52 +01:00
out = fmt . Sprintf ( "%s</code>" , out )
2020-01-27 06:22:13 +01:00
2020-01-27 06:29:25 +01:00
c := TGCommand {
Type : commandReplyMsg ,
2020-01-29 15:56:19 +01:00
Text : out ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
err = setJobDone ( j . ID64 )
logOnError ( err , "jobCraftItem : setJobDone" )
return
}
func jobCraftAll ( j Job ) {
var (
2020-02-01 08:00:33 +01:00
p JobPayloadCraftAll
p2 JobPayloadGetVault
b [ ] byte
itemParts map [ int64 ] string
itemRecipes map [ int64 ] string
ratioItems map [ string ] int64
totalParts map [ string ] int64
totalRecipes map [ string ] int64
completeItems map [ string ] float64
maxItems int64
2020-01-29 15:56:19 +01:00
)
err := setJobStart ( j . ID64 )
logOnError ( err , "jobCraftAll : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobCraftAll : Unmarshal payload" )
if p . Status == 0 {
p2 . JobCallbackID64 = j . ID64
p2 . ItemTypeList = make ( [ ] int64 , 0 )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_recipe ` ] )
p2 . ItemTypeList = append ( p2 . ItemTypeList , cacheObjSubType [ ` item_part ` ] )
b , err = json . Marshal ( p2 )
logOnError ( err , "jobCraftAll : Marshal(p2)" )
jobID64 , err := createJob ( cacheObjSubType [ ` job_get_vault ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . UTC ( ) , b )
p . Status = 1
p . VaultJobID64 = jobID64
b , err = json . Marshal ( p )
logOnError ( err , "jobCraftAll : Marshal(p)" )
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobCraftAll : setJobPayloadJSON(p)" )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
}
b = getJobPayload ( p . VaultJobID64 )
err = json . Unmarshal ( b , & p2 )
logOnError ( err , "jobCraftAll : Unmarshal(p2)" )
2020-02-01 08:00:33 +01:00
itemParts = make ( map [ int64 ] string )
itemRecipes = make ( map [ int64 ] string )
2020-02-01 03:44:34 +01:00
ratioItems = make ( map [ string ] int64 )
2020-02-01 08:00:33 +01:00
totalParts = make ( map [ string ] int64 )
totalRecipes = make ( map [ string ] int64 )
completeItems = make ( map [ string ] float64 )
2020-01-29 15:56:19 +01:00
muxObjItem . RLock ( )
2020-02-01 03:44:34 +01:00
for _ , o := range objItems {
if o . ItemTypeID == cacheObjSubType [ ` item_other ` ] {
if o . Craftable && o . Craft != nil {
for _ , i := range o . Craft . Items {
p , _ := getObjItem ( i . ItemID64 )
if p . ItemTypeID == cacheObjSubType [ ` item_part ` ] {
ratioItems [ o . Code ] = i . Quantity
2020-02-01 08:00:33 +01:00
itemParts [ p . ObjID64 ] = o . Code
2020-02-01 03:44:34 +01:00
} else if p . ItemTypeID == cacheObjSubType [ ` item_recipe ` ] {
2020-02-01 08:00:33 +01:00
itemRecipes [ p . ObjID64 ] = o . Code
2020-02-01 03:44:34 +01:00
}
}
}
2020-01-29 15:56:19 +01:00
}
}
2020-02-01 03:44:34 +01:00
muxObjItem . RUnlock ( )
for _ , i := range p2 . Vault {
2020-02-01 08:21:43 +01:00
if item , ok := itemParts [ i . ItemID64 ] ; ok {
2020-02-01 08:00:33 +01:00
totalParts [ item ] = i . Quantity
2020-02-01 08:21:43 +01:00
} else if item , ok := itemRecipes [ i . ItemID64 ] ; ok {
2020-02-01 08:00:33 +01:00
totalRecipes [ item ] = i . Quantity
}
}
2020-02-01 03:44:34 +01:00
2020-02-01 08:00:33 +01:00
for k , i := range ratioItems {
recipes , _ := totalRecipes [ k ]
parts , _ := totalParts [ k ]
if ( recipes > 0 && parts > ( i - 1 ) ) || ( parts >= i ) {
2020-02-01 08:21:43 +01:00
completeItems [ k ] = float64 ( MinInt64 ( recipes * i , parts ) / i )
2020-02-01 08:00:33 +01:00
maxItems = MaxInt64 ( maxItems , int64 ( completeItems [ k ] ) )
}
2020-02-01 03:44:34 +01:00
}
2020-01-29 15:56:19 +01:00
/* we can finish the job */
2020-02-01 08:00:33 +01:00
out := fmt . Sprintf ( "<code>Vault crafting summary\n" )
for maxItems > 0 {
out = fmt . Sprintf ( "%s%d Items :\n" , out , maxItems )
for k , v := range completeItems {
if maxItems == int64 ( v ) {
o , _ := getObjItem ( getSilentObjItemID ( k , ` ` ) )
out = fmt . Sprintf ( "%s %s - %s\n" , out , k , o . Names [ 0 ] )
2020-01-30 05:44:35 +01:00
}
2020-01-29 15:56:19 +01:00
}
2020-02-01 08:00:33 +01:00
maxItems -= 1
}
out = fmt . Sprintf ( "%s1 Part missing :\n" , out )
for k , v := range completeItems {
if int64 ( v ) == 0 {
parts , _ := totalParts [ k ]
if parts == ratioItems [ k ] - 1 {
o , _ := getObjItem ( getSilentObjItemID ( k , ` ` ) )
out = fmt . Sprintf ( "%s %s - %s\n" , out , k , o . Names [ 0 ] )
2020-01-30 05:44:35 +01:00
}
}
2020-02-01 08:00:33 +01:00
}
2020-01-27 06:22:13 +01:00
2020-02-01 08:00:33 +01:00
out = fmt . Sprintf ( "%sRecipe missing :\n" , out )
for k , v := range completeItems {
if int64 ( v ) == 0 {
recipe , _ := totalRecipes [ k ]
if recipe == 0 {
o , _ := getObjItem ( getSilentObjItemID ( k , ` ` ) )
out = fmt . Sprintf ( "%s %s - %s\n" , out , k , o . Names [ 0 ] )
}
2020-01-30 05:44:35 +01:00
}
2020-02-01 08:00:33 +01:00
}
out = fmt . Sprintf ( "%s</code>" , out )
c := TGCommand {
Type : commandReplyMsg ,
Text : out ,
FromMsgID64 : p . MsgID64 ,
FromChatID64 : p . ChatID64 ,
ParseMode : cmdParseModeHTML ,
}
TGCmdQueue <- c
2020-01-27 05:05:38 +01:00
err = setJobDone ( j . ID64 )
2020-02-01 08:00:33 +01:00
logOnError ( err , "jobCraftAll : setJobDone" )
2020-01-27 05:05:38 +01:00
return
}
2020-02-03 03:54:34 +01:00
func jobCheckVaultLimit ( j Job ) {
var (
p JobPayloadCheckVaultLimit
p2 JobPayloadGetVault
b [ ] byte
)
err := setJobStart ( j . ID64 )
logOnError ( err , "jobCheckVaultLimit : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobCheckVaultLimit : Unmarshal payload" )
if p . Status == 0 {
p2 . JobCallbackID64 = j . ID64
p2 . ItemTypeList = make ( [ ] int64 , 0 )
2020-02-05 09:34:44 +01:00
for _ , c := range cfg . Bot . VaultLimit {
o , err := getObjItem ( getSilentObjItemID ( c . Item , ` ` ) )
logOnError ( err , "jobCheckVaultLimit : getObjItem" )
p2 . ItemTypeList = append ( p2 . ItemTypeList , o . ItemTypeID )
}
2020-02-03 03:54:34 +01:00
b , err = json . Marshal ( p2 )
logOnError ( err , "jobCheckVaultLimit : Marshal(p2)" )
jobID64 , err := createJob ( cacheObjSubType [ ` job_get_vault ` ] , objJobPriority , j . UserID64 , 0 , time . Now ( ) . UTC ( ) , b )
p . Status = 1
p . VaultJobID64 = jobID64
b , err = json . Marshal ( p )
logOnError ( err , "jobCheckVaultLimit : Marshal(p)" )
err = setJobPayloadJSON ( j . ID64 , p )
logOnError ( err , "jobCheckVaultLimit : setJobPayloadJSON(p)" )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
}
b = getJobPayload ( p . VaultJobID64 )
err = json . Unmarshal ( b , & p2 )
logOnError ( err , "jobCheckVaultLimit : Unmarshal(p2)" )
err = setJobDone ( j . ID64 )
logOnError ( err , "jobCheckVaultLimit : setJobDone" )
return
}
2020-02-06 16:23:54 +01:00
func jobShops ( j Job ) {
2020-02-06 16:25:48 +01:00
err := setJobDone ( j . ID64 )
2020-02-06 16:23:54 +01:00
logOnError ( err , "jobShops : setJobDone" )
return
}
func jobShopsSlave ( j Job ) {
2020-02-06 16:25:48 +01:00
err := setJobDone ( j . ID64 )
2020-02-06 16:23:54 +01:00
logOnError ( err , "jobShopsSlave : setJobDone" )
return
}
2020-01-26 14:40:20 +01:00
func jobGetVault ( j Job ) {
var (
p JobPayloadGetVault
reqTab map [ int64 ] int64
doneTab map [ int64 ] int64
)
2020-01-26 14:58:06 +01:00
items := [ ] string { ` item_res ` , ` item_alch ` , ` item_misc ` , ` item_recipe ` , ` item_part ` , ` item_other ` }
2020-01-26 14:40:20 +01:00
reqTab = make ( map [ int64 ] int64 )
2020-01-26 14:55:43 +01:00
for k , v := range items {
2020-01-26 14:58:40 +01:00
reqTab [ cacheObjSubType [ v ] ] = 1 << uint ( k )
2020-01-26 14:55:43 +01:00
}
2020-01-26 14:40:20 +01:00
doneTab = make ( map [ int64 ] int64 )
2020-01-26 14:55:43 +01:00
for k , v := range items {
2020-01-26 14:58:40 +01:00
doneTab [ cacheObjSubType [ v ] ] = 1 << ( 10 + uint ( k ) )
2020-01-26 14:55:43 +01:00
}
2020-01-26 14:40:20 +01:00
err := setJobStart ( j . ID64 )
logOnError ( err , "jobGetVault : setJobStart" )
err = json . Unmarshal ( j . Payload , & p )
logOnError ( err , "jobGetVault : Unmarshal payload" )
if p . Status == 0 {
2020-01-26 14:53:56 +01:00
for _ , typeID64 := range p . ItemTypeList {
2020-01-26 14:40:20 +01:00
p . Status = p . Status | reqTab [ typeID64 ]
}
2020-01-26 14:55:43 +01:00
for _ , v := range items {
2020-01-26 14:53:56 +01:00
if ( p . Status & reqTab [ cacheObjSubType [ v ] ] ) == 0 {
p . Status = p . Status | doneTab [ cacheObjSubType [ v ] ]
}
2020-01-26 14:40:20 +01:00
}
}
if j . Trigger != 0 {
id , err := getObjSubTypeId ( j . Trigger )
logOnError ( err , "jobGetVault : getObjSubType(" + strconv . FormatInt ( j . Trigger , 10 ) + ")" )
if err == nil {
m , err := getObjMsg ( j . Trigger )
logOnError ( err , "jobGetVault : getObjMsg" )
rule , err := getMsgParsingRule ( m )
logOnError ( err , "jobGetVault : getMsgParsingRule" )
switch id {
case cacheObjSubType [ ` msg_gstock_any_ack ` ] :
cwm , err := parseSubTypeMessageGStockAnyAck ( m , rule . re )
logOnError ( err , "jobGetVault : parseSubTypeMessageGStockAnyAck" )
for _ , disp := range cwm . Stock {
2020-01-26 14:50:41 +01:00
item := ChatWarsItems {
2020-01-26 14:40:20 +01:00
Code : disp . Code ,
ItemID64 : disp . ItemID64 ,
Name : disp . Name ,
Quantity : disp . Quantity ,
}
p . Vault = append ( p . Vault , item )
}
p . CleanupMsg = append ( p . CleanupMsg , * m )
case cacheObjSubType [ ` msg_gstock_oth_req ` ] :
2020-01-26 15:24:09 +01:00
fallthrough
2020-01-26 14:40:20 +01:00
case cacheObjSubType [ ` msg_gstock_res_req ` ] :
2020-01-26 15:24:09 +01:00
fallthrough
2020-01-26 14:40:20 +01:00
case cacheObjSubType [ ` msg_gstock_alch_req ` ] :
2020-01-26 15:24:09 +01:00
fallthrough
2020-01-26 14:40:20 +01:00
case cacheObjSubType [ ` msg_gstock_misc_req ` ] :
2020-01-26 15:24:09 +01:00
fallthrough
2020-01-26 14:40:20 +01:00
case cacheObjSubType [ ` msg_gstock_rec_req ` ] :
2020-01-26 15:24:09 +01:00
fallthrough
2020-01-26 14:40:20 +01:00
case cacheObjSubType [ ` msg_gstock_part_req ` ] :
p . CleanupMsg = append ( p . CleanupMsg , * m )
setJobPayloadJSON ( j . ID64 , p )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
return
default :
}
}
}
if ( p . Status & reqTab [ cacheObjSubType [ ` item_res ` ] ] ) == reqTab [ cacheObjSubType [ ` item_res ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_res ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_res ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_res_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_res ` , 4 )
2020-01-26 14:40:20 +01:00
return
} else if ( p . Status & reqTab [ cacheObjSubType [ ` item_alch ` ] ] ) == reqTab [ cacheObjSubType [ ` item_alch ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_alch ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_alch ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_alch_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_alch ` , 4 )
2020-01-26 14:40:20 +01:00
return
} else if ( p . Status & reqTab [ cacheObjSubType [ ` item_misc ` ] ] ) == reqTab [ cacheObjSubType [ ` item_misc ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_misc ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_misc ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_misc_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_misc ` , 4 )
2020-01-26 14:40:20 +01:00
return
} else if ( p . Status & reqTab [ cacheObjSubType [ ` item_recipe ` ] ] ) == reqTab [ cacheObjSubType [ ` item_recipe ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_recipe ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_recipe ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_rec_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_rec ` , 4 )
2020-01-26 14:40:20 +01:00
return
} else if ( p . Status & reqTab [ cacheObjSubType [ ` item_part ` ] ] ) == reqTab [ cacheObjSubType [ ` item_part ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_part ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_part ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_part_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_parts ` , 4 )
2020-01-26 14:40:20 +01:00
return
} else if ( p . Status & reqTab [ cacheObjSubType [ ` item_other ` ] ] ) == reqTab [ cacheObjSubType [ ` item_other ` ] ] {
p . Status = p . Status &^ reqTab [ cacheObjSubType [ ` item_other ` ] ]
p . Status = p . Status | doneTab [ cacheObjSubType [ ` item_other ` ] ]
setJobPayloadJSON ( j . ID64 , p )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_oth_req ` ] )
setJobCallback ( j . ID64 , j . UserID64 , cacheObjSubType [ ` msg_gstock_any_ack ` ] )
rescheduleJob ( j . ID64 , 0 , time . Unix ( maxUnixTimestamp , 0 ) . UTC ( ) )
2020-01-27 06:10:31 +01:00
clientSendCWMsgDelay ( j . UserID64 , ` /g_stock_other ` , 4 )
2020-01-26 14:40:20 +01:00
return
2020-01-26 15:24:09 +01:00
} else {
setJobPayloadJSON ( j . ID64 , p )
2020-01-26 14:40:20 +01:00
}
/* clean up the messages */
for _ , d := range p . CleanupMsg {
2020-01-26 15:04:24 +01:00
clientDelTGMsg ( j . UserID64 , d . ID64 , d . ChatID64 )
2020-01-26 14:40:20 +01:00
}
/* wake up the callback */
err = rescheduleJob ( p . JobCallbackID64 , 0 , time . Now ( ) . UTC ( ) )
logOnError ( err , "jobGetVault : rescheduleJob" )
/* no more req to send, all ack came through, we can finish the job */
err = setJobDone ( j . ID64 )
logOnError ( err , "jobGetVault : setJobDone" )
return
}