chirpnest/job.go

1690 lines
54 KiB
Go
Raw Normal View History

2019-05-11 06:54:12 +02:00
package main
import (
2019-06-11 17:39:04 +02:00
"archive/zip"
"bytes"
2020-01-12 10:17:14 +01:00
"compress/zlib"
2020-01-15 11:21:15 +01:00
"crypto/aes"
2020-01-15 11:17:38 +01:00
"crypto/sha256"
"encoding/binary"
2020-01-15 11:58:49 +01:00
"encoding/hex"
2019-05-11 06:55:05 +02:00
"encoding/json"
2019-12-13 12:45:54 +01:00
"errors"
2019-05-11 06:54:12 +02:00
"fmt"
2019-06-11 17:39:04 +02:00
"io/ioutil"
2019-06-11 17:53:29 +02:00
"log"
2019-06-11 17:40:24 +02:00
"net/http"
2019-10-09 04:10:15 +02:00
"regexp"
2019-05-27 05:11:16 +02:00
"strconv"
2019-06-11 16:56:48 +02:00
"strings"
2019-05-11 06:55:05 +02:00
"time"
2019-06-11 17:55:31 +02:00
tb "gopkg.in/tucnak/telebot.v2"
2019-05-11 06:54:12 +02:00
)
2020-01-02 13:05:33 +01:00
func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64, schedule time.Time, payload []byte) (int64, error) {
2020-01-12 10:20:36 +01:00
var zb bytes.Buffer
2020-01-12 10:11:34 +01:00
zw := zlib.NewWriter(&zb)
zw.Write(payload)
zw.Close()
zpayload := zb.Bytes()
if len(zpayload) > 20000 {
2019-12-13 12:45:08 +01:00
return 0, errors.New("payload too long")
}
2019-06-11 16:56:48 +02:00
stmt, err := db.Prepare(`INSERT INTO obj (obj_type_id, obj_sub_type_id)
VALUES (? , ?);`)
logOnError(err, "createJob : prepare insert obj")
if err != nil {
return 0, err
}
defer stmt.Close()
2019-05-27 05:08:10 +02:00
2020-01-02 16:33:41 +01:00
res, err := stmt.Exec(cacheObjType[`job`], jobTypeID64)
s := fmt.Sprintf("createJob, insert obj(%d, %d)", cacheObjType[`job`], jobTypeID64)
2019-06-11 16:56:48 +02:00
logOnError(err, s)
if err != nil {
return 0, err
}
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
objId, err := res.LastInsertId()
logOnError(err, "createJob : get last insert Id")
if err != nil {
return 0, err
}
2019-05-27 05:08:10 +02:00
2019-08-23 14:09:08 +02:00
stmt, err = db.Prepare(`INSERT INTO obj_job (obj_id, priority, user_id, trigger_id, seq_nr, schedule, is_done, in_work, inserted, timeout, pulled, started, ended, payload)
2019-08-29 12:44:09 +02:00
VALUES (?, ?, ?, ?, NULL, ?, 0, 0, ?, ?, NULL, NULL, NULL, ?);`)
2019-06-11 16:56:48 +02:00
logOnError(err, "createJob : prepare insert obj_job")
if err != nil {
return 0, err
}
defer stmt.Close()
2019-05-27 05:08:10 +02:00
2020-01-12 10:11:34 +01:00
_, err = stmt.Exec(objId, priority, userID64, trigger, schedule.UTC(), time.Now().UTC(), time.Unix(maxUnixTimestamp, 0).UTC(), zpayload)
2019-06-11 16:56:48 +02:00
logOnError(err, "createJob : insert obj_job")
if err != nil {
return 0, err
}
2019-05-27 05:08:10 +02:00
2020-01-12 10:32:31 +01:00
j := new(Job)
j.ID64 = objId
j.JobTypeID64 = jobTypeID64
j.Trigger = trigger
j.Timeout = time.Unix(maxUnixTimestamp, 0).UTC()
j.UserID64 = userID64
j.Payload = payload
muxObjJob.Lock()
cacheObjJob[objId] = *j
muxObjJob.Unlock()
2019-06-11 16:56:48 +02:00
return objId, nil
2019-06-11 16:50:01 +02:00
}
2019-05-27 05:08:10 +02:00
2020-01-02 13:05:33 +01:00
func createJobCallback(jobTypeID64 int64, userID64 int64, msgTypeID64 int64, payload []byte, timeout time.Duration) error {
2019-08-23 14:09:08 +02:00
//t, err := time.Parse(time.RFC3339, "9999-12-31T00:00:00+00:00")
2020-01-02 13:05:33 +01:00
jobID64, err := createJob(jobTypeID64, objJobPriority, userID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), payload)
2019-08-17 08:18:25 +02:00
if err != nil {
return err
}
setJobCallback(jobID64, userID64, msgTypeID64)
2019-08-29 12:24:21 +02:00
err = setJobTimeout(jobID64, timeout)
logOnError(err, "createJobCallback : setJobTimeout")
2019-08-17 08:18:25 +02:00
return nil
}
func setJobCallback(jobID64 int64, userID64 int64, msgTypeID64 int64) {
muxCallbacks.Lock()
2019-08-17 08:38:37 +02:00
if _, ok := callbacks[userID64]; !ok {
2019-08-17 08:43:26 +02:00
callbacks[userID64] = make(map[int64][]int64)
2019-08-17 08:38:37 +02:00
}
s := callbacks[userID64][msgTypeID64]
s = append(s, jobID64)
callbacks[userID64][msgTypeID64] = s
2019-08-17 08:18:25 +02:00
muxCallbacks.Unlock()
}
2019-08-23 14:09:08 +02:00
func setJobTimeout(jobID64 int64, d time.Duration) error {
stmt, err := db.Prepare(`UPDATE obj_job j SET j.timeout = ? WHERE j.obj_id = ?;`)
logOnError(err, "setJobTimeout : prepare update obj_job")
if err != nil {
return err
}
defer stmt.Close()
2020-01-12 10:32:31 +01:00
t := time.Now().UTC().Add(d)
_, err = stmt.Exec(t, jobID64)
2019-08-29 12:38:35 +02:00
logOnError(err, fmt.Sprintf("setJobTimeout, update obj_job(%d)", jobID64))
2019-08-23 14:09:08 +02:00
if err != nil {
return err
}
2020-01-12 10:32:31 +01:00
muxObjJob.Lock()
j := cacheObjJob[jobID64]
j.Timeout = t
cacheObjJob[jobID64] = j
muxObjJob.Unlock()
2019-08-23 14:09:08 +02:00
return nil
}
2019-08-23 14:10:44 +02:00
func setJobDone(jobID64 int64) error {
2019-06-11 16:56:48 +02:00
stmt, err := db.Prepare(`UPDATE obj_job j SET j.is_done = 1, j.in_work = 0, j.ended = ? WHERE j.obj_id = ?;`)
logOnError(err, "setJobDone : prepare update obj_job")
if err != nil {
return err
2019-05-27 05:08:10 +02:00
}
2019-06-11 16:56:48 +02:00
defer stmt.Close()
2019-05-27 05:08:10 +02:00
2019-08-23 14:10:44 +02:00
_, err = stmt.Exec(time.Now().UTC(), jobID64)
s := fmt.Sprintf("setJobDone, update obj_job(%d)", jobID64)
2019-06-11 16:56:48 +02:00
logOnError(err, s)
if err != nil {
return err
}
return nil
2019-06-11 16:50:01 +02:00
}
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
func setJobStart(jobId int64) error {
stmt, err := db.Prepare(`UPDATE obj_job j SET j.started = coalesce(j.started, ?) WHERE j.obj_id = ?;`)
logOnError(err, "setJobStart : prepare update obj_job")
if err != nil {
return err
}
defer stmt.Close()
2019-05-27 05:08:10 +02:00
2019-06-11 16:56:48 +02:00
_, err = stmt.Exec(time.Now().UTC(), jobId)
s := fmt.Sprintf("setJobStart, update obj_job(%d)", jobId)
logOnError(err, s)
if err != nil {
return err
}
return nil
2019-05-27 05:08:10 +02:00
}
2019-08-21 05:46:42 +02:00
func rescheduleJob(jobID64 int64, trigger int64, schedule time.Time) error {
2019-12-15 08:37:58 +01:00
stmt, err := db.Prepare(`UPDATE obj_job j SET j.in_work = 0, j.schedule = ?, j.trigger_id = ? WHERE j.obj_id = ?;`)
2019-06-11 16:56:48 +02:00
logOnError(err, "rescheduleJob : prepare update obj_job")
if err != nil {
return err
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
defer stmt.Close()
2019-08-21 05:46:42 +02:00
_, err = stmt.Exec(schedule.UTC(), trigger, jobID64)
2019-06-11 16:56:48 +02:00
s := fmt.Sprintf("rescheduleJob, update obj_job(%d)", jobID64)
logOnError(err, s)
if err != nil {
return err
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
return nil
2019-06-11 16:50:01 +02:00
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
func loadCurrentJobs() ([]Job, error) {
var (
2020-01-02 13:07:46 +01:00
objId int64
jobTypeID64 int64
userID64 int64
trigger int64
timeout time.Time
2020-01-12 10:11:34 +01:00
zpayload []byte
2020-01-02 13:07:46 +01:00
jobs []Job
2019-06-11 16:56:48 +02:00
)
t := time.Now().UTC()
r := RndInt64()
_, err := db.Exec("UPDATE obj_job j SET j.pulled = ?, j.in_work = 1, j.seq_nr = ? WHERE j.is_done = 0 AND j.in_work = 0 AND j.schedule <= ? ORDER BY j.priority ASC, j.obj_id ASC LIMIT ?;", t, r, t, SQLJobSliceSize)
logOnError(err, "loadCurrentJobs : update intial rows")
2019-08-01 05:49:05 +02:00
if err != nil {
return jobs, err
}
2019-06-11 16:56:48 +02:00
2019-08-23 14:09:08 +02:00
stmt, err := db.Prepare("SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;")
2019-06-11 16:56:48 +02:00
logOnError(err, "loadCurrentJobs : prepare select statement")
2019-08-01 05:49:05 +02:00
if err != nil {
stmt.Close()
return jobs, err
}
2019-06-11 16:56:48 +02:00
rows, err := stmt.Query(r)
// rows, err := stmt.Query(time.Now())
logOnError(err, "loadCurrentJobs : query select statement")
2019-08-01 05:49:05 +02:00
if err != nil {
stmt.Close()
return jobs, err
}
2019-06-11 16:56:48 +02:00
for rows.Next() {
2020-01-12 10:11:34 +01:00
err = rows.Scan(&objId, &jobTypeID64, &trigger, &userID64, &zpayload, &timeout)
2019-06-11 16:56:48 +02:00
logOnError(err, "loadCurrentJobs : scan query rows")
2020-01-12 10:11:34 +01:00
zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb)
if err != nil {
2020-01-12 10:37:46 +01:00
logOnError(err, "loadCurrentJobs : zlib.NewReader")
2020-01-12 10:11:34 +01:00
continue
}
b := new(bytes.Buffer)
2020-01-12 10:22:13 +01:00
b.ReadFrom(zr)
2020-01-12 10:11:34 +01:00
payload := b.Bytes()
2019-06-11 16:56:48 +02:00
job := Job{
2020-01-02 13:07:46 +01:00
ID64: objId,
JobTypeID64: jobTypeID64,
Trigger: trigger,
UserID64: userID64,
Timeout: timeout,
2020-01-12 10:11:34 +01:00
Payload: payload,
2019-06-11 16:56:48 +02:00
}
2020-01-12 10:11:34 +01:00
2019-06-11 16:56:48 +02:00
jobs = append(jobs, job)
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
err = rows.Err()
logOnError(err, "loadCurrentJobs : scan end rows")
rows.Close()
2019-08-01 05:49:05 +02:00
if err != nil {
stmt.Close()
return jobs, err
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = stmt.Close()
logOnError(err, "loadCurrentJobs : close select statement")
2019-08-01 05:49:05 +02:00
if err != nil {
return jobs, err
}
2019-06-11 16:56:48 +02:00
return jobs, nil
2019-06-11 16:50:01 +02:00
}
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
func jobRescan(j Job) {
2019-06-14 06:18:26 +02:00
var p JobPayloadRescanMsg
2019-05-19 05:24:45 +02:00
2019-06-11 16:56:48 +02:00
err := setJobStart(j.ID64)
logOnError(err, "jobRescan : setJobStart")
2019-05-11 07:06:40 +02:00
2019-06-14 06:18:26 +02:00
err = json.Unmarshal(j.Payload, &p)
2019-06-11 16:56:48 +02:00
logOnError(err, "jobRescan : Unmarshal payload")
start := time.Now()
2019-06-14 06:15:45 +02:00
milestone := time.Now()
2019-06-11 16:50:01 +02:00
2019-06-14 06:19:28 +02:00
ids := getSQLListID64(p.Query)
2019-06-11 16:56:48 +02:00
if len(ids) > 1 {
2019-06-14 06:15:45 +02:00
txt := fmt.Sprintf("Rescanning %d messages.", len(ids))
m := TGCommand{
Type: commandReplyMsg,
Text: txt,
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
i := 0
2019-06-11 16:56:48 +02:00
for _, id := range ids {
SQLMsgIdentifyQueue <- id
2019-06-14 06:15:45 +02:00
i = i + 1
if time.Now().After(milestone.Add(1 * time.Minute)) {
2019-06-14 06:18:26 +02:00
//txt := fmt.Sprintf("Rescanned %d/%d messages.", i, len(ids))
2019-06-14 06:15:45 +02:00
m = TGCommand{
Type: commandReplyMsg,
2019-06-14 06:18:26 +02:00
Text: fmt.Sprintf("Rescanned %d/%d messages.", i, len(ids)),
2019-06-14 06:15:45 +02:00
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
milestone = time.Now()
}
2019-06-11 16:56:48 +02:00
}
2019-06-14 06:15:45 +02:00
2019-06-14 06:18:26 +02:00
r := JobPayloadSetDone{
2019-06-11 16:56:48 +02:00
JobID64: j.ID64,
2019-06-14 06:18:26 +02:00
MsgID64: p.MsgID64,
ChatID64: p.ChatID64,
2019-06-11 16:56:48 +02:00
Text: fmt.Sprintf("%d messages processed in %s.", len(ids), time.Since(start)),
2019-05-11 06:54:12 +02:00
}
2019-06-14 06:18:26 +02:00
b, _ := json.Marshal(r)
2020-01-02 13:04:12 +01:00
_, err := createJob(cacheObjSubType[`job_set_done`], objJobPriorityRescanAllMsg, j.UserID64, j.ID64, time.Now().UTC(), b)
logOnError(err, "jobRescan : createJob(cacheObjSubType[`job_set_done`])")
2019-06-11 16:56:48 +02:00
} else if len(ids) == 1 {
SQLMsgIdentifyQueue <- ids[0]
err = setJobDone(j.ID64)
logOnError(err, "jobRescan : setJobDone(1)")
2019-06-14 06:18:52 +02:00
if p.MsgID64 != 0 || p.ChatID64 != 0 {
2019-06-11 16:56:48 +02:00
m := TGCommand{
2019-05-16 05:06:38 +02:00
Type: commandReplyMsg,
2019-06-11 16:56:48 +02:00
Text: "One message processed.",
2019-06-14 06:18:52 +02:00
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-05-16 05:06:38 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
}
} else {
err = setJobDone(j.ID64)
logOnError(err, "jobRescan : setJobDone(0)")
2019-06-14 06:19:28 +02:00
if p.MsgID64 != 0 || p.ChatID64 != 0 {
2019-06-11 16:56:48 +02:00
m := TGCommand{
2019-05-16 05:06:38 +02:00
Type: commandReplyMsg,
2019-06-11 16:56:48 +02:00
Text: "No message processed.",
2019-06-14 06:18:52 +02:00
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-05-16 05:06:38 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-16 04:49:34 +02:00
}
2019-05-11 06:54:12 +02:00
}
return
}
2019-06-11 16:56:48 +02:00
func jobSetDone(j Job) {
var r JobPayloadSetDone
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err := setJobStart(j.ID64)
logOnError(err, "jobSetDone : setJobStart")
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = json.Unmarshal(j.Payload, &r)
logOnError(err, "jobSetDone : Unmarshal payload")
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone(r.JobID64)
logOnError(err, "jobSetDone : setJobDone(child)")
2019-05-11 07:06:40 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone(j.ID64)
logOnError(err, "jobSetDone : setJobDone")
2019-06-11 16:50:01 +02:00
2019-06-11 16:56:48 +02:00
m := TGCommand{
Type: commandReplyMsg,
Text: r.Text,
FromMsgID64: r.MsgID64,
FromChatID64: r.ChatID64,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-16 04:52:30 +02:00
2019-05-11 06:54:12 +02:00
return
}
2019-05-16 14:39:12 +02:00
2019-06-11 16:56:48 +02:00
func jobPillage(j Job) {
var r JobPayloadPillage
err := setJobStart(j.ID64)
logOnError(err, "jobPillage : setJobStart")
err = json.Unmarshal(j.Payload, &r)
logOnError(err, "jobPillage : Unmarshal payload")
// check if we have a acknoledgment of go or a timeout within 3m30 of the PillageInc from the Job
2019-10-11 08:53:49 +02:00
ids := getSQLListID64(`SELECT ox.id
FROM obj ox
,obj_msg omx
,obj op
,obj_msg omp
,obj_job oj
WHERE oj.obj_id = ` + strconv.FormatInt(j.ID64, 10) + `
AND omx.user_id = oj.user_id
AND omx.sender_user_id = ` + strconv.Itoa(userID64ChtWrsBot) + `
AND omx.obj_id = ox.id
2020-01-02 12:40:45 +01:00
AND ox.obj_sub_type_id in (` + strconv.FormatInt(cacheObjSubType[`msg_pillage_go`], 10) +
`, ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_timeout`], 10) +
`, ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_loss`], 10) +
`, ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_win`], 10) + `)
2019-10-11 08:53:49 +02:00
AND op.id = ` + strconv.FormatInt(r.ObjID64, 10) + `
AND omp.obj_id = op.id
AND omx.date between omp.date AND ADDTIME(omp.date, '0 0:3:30.000000')
2020-01-02 12:40:45 +01:00
ORDER BY CASE ox.obj_sub_type_id WHEN ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_win`], 10) + ` THEN 0
WHEN ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_loss`], 10) + ` THEN 1
WHEN ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_timeout`], 10) + ` THEN 2
WHEN ` + strconv.FormatInt(cacheObjSubType[`msg_pillage_go`], 10) + ` THEN 3
2019-10-11 08:53:49 +02:00
ELSE 4 END ASC
LIMIT 1;`)
2019-06-11 16:56:48 +02:00
if len(ids) > 1 { // issue there ?
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("More than one outcome for pillage #%d", r.ObjID64),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
} else if len(ids) == 1 { // we've got a match, job is done whether we prevented the pillage or not
m, err := getObjMsg(ids[0])
2020-01-02 12:40:45 +01:00
logOnError(err, "jobPillage : getMsg(cacheObjSubType[`msg_pillage_go`], cacheObjSubType[`msg_pillage_timeout`], 10)")
2019-06-11 16:56:48 +02:00
if err == nil {
if m.Date.Add(60 * time.Second).After(time.Now().UTC()) {
msgTypeID64, err := getObjSubTypeId(ids[0])
logOnError(err, "jobPillage : getObjSubTypeId")
if err == nil {
2020-01-02 12:35:03 +01:00
if msgTypeID64 == cacheObjSubType[`msg_pillage_go`] {
2019-06-11 16:56:48 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We avoided a pillage (%s)", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType[`msg_pillage_win`] {
2019-06-11 16:56:48 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We avoided a pillage (%s))", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType[`msg_pillage_loss`] {
2019-06-11 16:56:48 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We got pillaged (%s)", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
2020-01-02 12:35:03 +01:00
} else if msgTypeID64 == cacheObjSubType[`msg_pillage_timeout`] {
2019-06-11 16:56:48 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We got pillaged (%s)", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
} else {
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We don't know what happened (%s)", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
}
}
}
2019-05-27 05:08:10 +02:00
}
2019-06-11 16:56:48 +02:00
err = setJobDone(j.ID64)
logOnError(err, "jobSetDone : setJobDone")
2019-05-27 05:08:10 +02:00
return
}
2019-07-31 04:06:05 +02:00
// is the job outdated now ?
if time.Now().UTC().After(r.Date.Add(time.Minute*3 + time.Second*30)) {
2019-08-04 16:33:23 +02:00
// log.Printf("jobPillage :\n\tPillageTime : %s\n\tNowTime : %s\n", r.Date.Format(time.RFC3339), time.Now().UTC().Format(time.RFC3339))
2019-07-31 04:06:05 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("Pillage interception expired"),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
return
}
2019-06-11 16:56:48 +02:00
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("No outcome for the pillage yet"),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
//no outcome yet, have we sent a "/go" in the last 30 sec ?
ids = getSQLListID64(` select ox.id
from obj ox
,obj_msg omx
,obj_job oj
where oj.obj_id = ` + strconv.FormatInt(j.ID64, 10) + `
and omx.user_id = oj.user_id
and omx.sender_user_id = oj.user_id
and omx.obj_id = ox.id
2020-01-02 12:40:45 +01:00
and ox.obj_sub_type_id =` + strconv.FormatInt(cacheObjSubType[`msg_go`], 10) + `
2019-06-11 16:56:48 +02:00
and omx.date between addtime(oj.schedule, '-30') and oj.schedule;`)
if len(ids) > 0 { // we did, so we reschedule the job to check the outcome and wait
m, err := getObjMsg(ids[0])
2020-01-02 12:40:45 +01:00
logOnError(err, "jobPillage : getMsg(cacheObjSubType[`msg_go`], 10)")
2019-06-11 16:56:48 +02:00
if err == nil {
s := TGCommand{
Type: commandSendMsg,
Text: fmt.Sprintf("We started intercepting the pillage (%s)", m.Date.Format(time.RFC3339)),
ToUserID64: j.UserID64,
}
TGCmdQueue <- s
2019-05-27 05:08:10 +02:00
}
2019-08-21 05:46:42 +02:00
err = rescheduleJob(j.ID64, j.Trigger, time.Now().Add(30*time.Second).UTC())
2020-01-02 12:40:45 +01:00
logOnError(err, "jobPillage : rescheduleJob(cacheObjSubType[`msg_go`], 10)")
2019-06-11 16:56:48 +02:00
} else { //no /go in the last 30 sec so we go ahead, send one and reschedule to check again in 25sec
clientSendCWMsg(j.UserID64, "/go")
2019-08-21 05:46:42 +02:00
err = rescheduleJob(j.ID64, j.Trigger, time.Now().Add(30*time.Second).UTC())
2019-06-11 16:56:48 +02:00
logOnError(err, "jobPillage : rescheduleJob")
2019-05-27 05:08:10 +02:00
}
2019-05-16 14:39:12 +02:00
return
}
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
func jobMsgRefresh(j Job) {
var p JobPayloadMsgRefresh
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
// identify whether the message has been properly refreshed ? create new job ? reschedule same job ?
err := setJobStart(j.ID64)
logOnError(err, "jobMsgRefresh : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobMsgRefresh : Unmarshal payload")
m, err := getObjMsg(p.ObjID64)
if err != nil && strings.Compare(err.Error(), `sql: no rows in result set`) == 0 {
err = setJobDone(j.ID64)
logOnError(err, "joMsgClient : setJobDone")
return
2019-06-11 09:44:28 +02:00
} else {
2019-06-11 16:56:48 +02:00
logOnError(err, "jobMsgRefresh : getObjMsg")
err = setJobDone(j.ID64)
logOnError(err, "joMsgClient : setJobDone")
2019-06-11 09:44:28 +02:00
return
}
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
err = delObj(p.ObjID64)
logOnError(err, "jobMsgRefresh : delObj")
2019-06-10 06:03:47 +02:00
2019-07-31 10:57:45 +02:00
clientRefreshCWMsg(m.TGUserID64, m.ChatID64, m.ID64)
2019-06-10 06:03:47 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone(j.ID64)
logOnError(err, "joMsgClient : setJobDone")
2019-06-10 06:03:47 +02:00
return
}
2019-06-11 16:56:48 +02:00
func jobMsgClient(j Job) {
var p JobPayloadMsgClient
err := setJobStart(j.ID64)
logOnError(err, "jobMsgClient : setJobStart")
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobMsgClient : Unmarshal payload")
if err == nil {
clientSendCWMsg(j.UserID64, p.Text)
m := TGCommand{
2019-06-03 03:01:18 +02:00
Type: commandReplyMsg,
2019-06-11 16:56:48 +02:00
Text: "Message sent.",
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-06-03 03:01:18 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-05-30 06:12:01 +02:00
}
2019-05-26 15:06:12 +02:00
2019-06-11 16:56:48 +02:00
err = setJobDone(j.ID64)
logOnError(err, "joMsgClient : setJobDone")
2019-05-26 15:06:12 +02:00
return
}
2019-06-11 03:59:20 +02:00
2020-01-07 11:32:20 +01:00
func jobMsgFwd(j Job) {
var p JobPayloadMsgFwd
err := setJobStart(j.ID64)
logOnError(err, "jobFwdMsg : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobFwdMsg : Unmarshal payload")
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobFwdMsg : getObjMsg msg")
clientFwdCWMsg(j.UserID64, msg.ID64, msg.ChatID64, p.ChatID64)
err = setJobDone(j.ID64)
logOnError(err, "jobFwdMsg : setJobDone")
return
}
2020-01-08 10:21:00 +01:00
func jobMsgDelete(j Job) {
2020-01-07 11:32:20 +01:00
var p JobPayloadMsgDel
err := setJobStart(j.ID64)
logOnError(err, "jobMsgDel : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobMsgDel : Unmarshal payload")
2020-01-13 09:42:26 +01:00
b, _ := json.Marshal(p)
2020-01-14 07:58:34 +01:00
log.Printf("jobMsgDelete[%d] : %d : Payload => %s.\n", j.ID64, j.UserID64, string(b))
2020-01-13 09:42:26 +01:00
2020-01-13 09:30:41 +01:00
if j.Trigger != 0 && p.MsgTypeID64 != 0 {
logOnError(err, "jobMsgDel : getObjMsg msg")
id, err := getObjSubTypeId(j.Trigger)
2020-01-13 09:36:13 +01:00
logOnError(err, "jobMsgDelete : getObjSubTypeId("+strconv.FormatInt(j.Trigger, 10)+")")
2020-01-13 09:30:41 +01:00
if id == p.MsgTypeID64 {
if p.Delay == 0 {
2020-01-13 09:32:42 +01:00
obj, err := getObjMsg(j.Trigger)
2020-01-13 09:36:13 +01:00
logOnError(err, "jobMsgDelete : getObjMsg("+strconv.FormatInt(j.Trigger, 10)+")")
2020-01-13 09:30:41 +01:00
clientDelTGMsg(j.UserID64, obj.ID64, obj.ChatID64)
} else {
2020-01-13 10:04:20 +01:00
delay := p.Delay
2020-01-13 10:03:56 +01:00
p.Delay = 0
2020-01-13 09:30:41 +01:00
p.ObjMsgID64 = j.Trigger
b, _ := json.Marshal(p)
2020-01-13 10:03:56 +01:00
_, err = createJob(cacheObjSubType[`job_msg_del`], objJobPriority, j.UserID64, 0, time.Now().Add(delay).UTC(), b)
2020-01-13 09:30:41 +01:00
}
} else {
log.Printf("jobMsgDelete : cannot identify msg to delete")
}
} else if p.ObjMsgID64 != 0 {
if p.Delay == 0 {
2020-01-13 09:45:26 +01:00
obj, err := getObjMsg(p.ObjMsgID64)
logOnError(err, "jobMsgDelete : getObjMsg("+strconv.FormatInt(p.ObjMsgID64, 10)+")")
2020-01-13 09:30:41 +01:00
clientDelTGMsg(j.UserID64, obj.ID64, obj.ChatID64)
} else {
2020-01-13 10:03:56 +01:00
delay := p.Delay
2020-01-13 09:43:58 +01:00
p.Delay = 0
2020-01-13 09:30:41 +01:00
b, _ := json.Marshal(p)
2020-01-13 10:03:56 +01:00
_, err = createJob(cacheObjSubType[`job_msg_del`], objJobPriority, j.UserID64, 0, time.Now().Add(delay).UTC(), b)
2020-01-13 09:30:41 +01:00
}
}
2020-01-07 11:32:20 +01:00
err = setJobDone(j.ID64)
logOnError(err, "jobMsgDel : setJobDone")
return
}
2019-06-11 16:56:48 +02:00
func jobBackupExport(j Job) {
var p JobPayloadBackupExport
err := setJobStart(j.ID64)
logOnError(err, "jobBackupExport : setJobStart")
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobBackupExport : Unmarshal payload")
bkp := DataBackup{}
start := time.Now()
milestone := time.Now()
s := new([]ChatWarsMessage)
msgs := *s
ids := getSQLListID64(`SELECT om.obj_id id FROM obj_msg om;`)
txt := fmt.Sprintf("Backing up %d messages.", len(ids))
m := TGCommand{
Type: commandReplyMsg,
Text: txt,
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-06-11 03:59:20 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
i := 0
for _, id := range ids {
2019-06-11 17:41:31 +02:00
msg, err := getObjMsg(id)
2019-06-11 16:56:48 +02:00
logOnError(err, "jobBackupExport : getMsg")
if err == nil {
2019-06-11 17:41:31 +02:00
msgs = append(msgs, *msg)
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
i = i + 1
2019-06-11 18:00:20 +02:00
if time.Now().After(milestone.Add(1 * time.Minute)) {
2019-06-11 16:56:48 +02:00
txt := fmt.Sprintf("Exported %d/%d messages.", i, len(ids))
m = TGCommand{
Type: commandReplyMsg,
Text: txt,
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
milestone = time.Now()
2019-06-11 16:50:01 +02:00
}
}
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
bkp.Messages = msgs
b, err := json.Marshal(bkp)
logOnError(err, "jobBackupExport : Marshal")
m = TGCommand{
Type: commandReplyMsg,
Text: `Compressing archive`,
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
zbuf := new(bytes.Buffer)
zw := zip.NewWriter(zbuf)
zf, err := zw.Create(`backup.json`)
logOnError(err, "jobBackupExport : Create")
_, err = zf.Write(b)
logOnError(err, "jobBackupExport : Write")
err = zw.Close()
logOnError(err, "jobBackupExport : Close")
d := tb.Document{}
d.File = tb.FromReader(bytes.NewReader(zbuf.Bytes()))
d.FileName = fmt.Sprintf("%s.backup.zip", start.Format("20060102150405"))
d.Caption = d.FileName
d.MIME = `application/zip`
m = TGCommand{
2019-06-11 16:50:01 +02:00
Type: commandReplyMsg,
2019-06-11 16:56:48 +02:00
Text: `Export done.`,
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
m = TGCommand{
Type: commandSendDocument,
Document: d,
ToChatID64: p.ChatID64,
2019-06-11 16:50:01 +02:00
}
2019-06-11 16:56:48 +02:00
TGCmdQueue <- m
err = setJobDone(j.ID64)
logOnError(err, "jobBackupExport : setJobDone")
2019-06-11 16:50:01 +02:00
return
}
2019-06-11 03:59:20 +02:00
2019-06-11 16:56:48 +02:00
func jobBackupImport(j Job) {
var p JobPayloadBackupImport
err := setJobStart(j.ID64)
logOnError(err, "jobBackupImport : setJobStart")
2019-06-11 16:50:01 +02:00
2019-06-11 16:56:48 +02:00
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobBackupImport : Unmarshal payload")
2019-06-11 16:50:01 +02:00
2019-06-11 17:38:12 +02:00
resp, err := http.Get(p.URL)
logOnError(err, "jobBackupImport : Get")
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
m := TGCommand{
Type: commandReplyMsg,
Text: "File downloaded.",
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
z := buf.Bytes()
r := bytes.NewReader(z)
zr, err := zip.NewReader(r, int64(len(z)))
for _, f := range zr.File {
if strings.Compare(f.Name, "backup.json") == 0 {
rc, err := f.Open()
logOnError(err, "jobBackupImport : Open")
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
data, err := ioutil.ReadAll(rc)
logOnError(err, "jobBackupImport : ReadAll")
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
log.Printf("jobBackupImport : %d uncompressed bytes.\n", len(data))
rc.Close()
bkp := DataBackup{}
err = json.Unmarshal(data, &bkp)
logOnError(err, "jobBackupImport : Unmarshal")
if err != nil {
2019-06-11 17:53:29 +02:00
return
2019-06-11 17:38:12 +02:00
}
2019-06-11 17:58:19 +02:00
for _, msg := range bkp.Messages {
MQCWMsgQueue <- msg
2019-06-11 17:38:12 +02:00
}
m := TGCommand{
Type: commandReplyMsg,
Text: "Backup restored.",
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
err = setJobDone(j.ID64)
logOnError(err, "jobBackupImport : setJobDone")
return
2019-06-11 16:56:48 +02:00
}
2019-06-11 03:59:20 +02:00
}
2019-06-11 17:55:31 +02:00
m = TGCommand{
2019-06-11 17:38:12 +02:00
Type: commandReplyMsg,
Text: "Not backup file found in archive.",
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
}
TGCmdQueue <- m
2019-06-11 16:56:48 +02:00
err = setJobDone(j.ID64)
logOnError(err, "jobBackupImport : setJobDone")
2019-06-11 03:59:20 +02:00
return
}
2019-08-08 14:39:23 +02:00
func jobGStock(j Job) {
var p JobPayloadGStock
2019-12-13 12:19:52 +01:00
var resSize, resCount, alchSize, alchCount, miscSize, miscCount, recSize, recCount, partSize, partCount, otherSize, otherCount, totalSize int64
2019-08-08 14:39:23 +02:00
err := setJobStart(j.ID64)
logOnError(err, "jobGStock : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobGStock : Unmarshal payload")
2019-12-13 12:19:52 +01:00
2019-12-13 12:36:19 +01:00
fmt.Printf("jobGStock : Progress => %d\n", p.Progress)
fmt.Printf("jobGStock : UserID64 => %d\n", j.UserID64)
2019-12-13 12:19:52 +01:00
switch p.Progress {
case 0: // send /g_stock_res
p.Progress = 1
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
jobID64, err := createJob(cacheObjSubType[`job_gstock`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
2019-12-14 07:58:28 +01:00
logOnError(err, "jobGStock : createJob")
2020-01-02 12:43:53 +01:00
setJobCallback(jobID64, j.UserID64, cacheObjSubType[`msg_gstock_any_ack`])
setJobCallback(jobID64, j.UserID64, cacheObjSubType[`msg_busy`])
setJobCallback(jobID64, j.UserID64, cacheObjSubType[`msg_battle`])
2019-12-14 07:58:28 +01:00
err = setJobTimeout(jobID64, 1*time.Minute)
logOnError(err, "jobGStock : setJobTimeout")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_res")
2019-12-13 12:19:52 +01:00
case 1: // send /g_stock_alch
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
2020-01-02 12:35:03 +01:00
if rule.MsgTypeID64 == cacheObjSubType[`msg_gstock_any_ack`] {
2019-12-14 07:58:28 +01:00
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
p.Progress = 2
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gstock`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, 1*time.Minute)
2019-12-14 07:58:28 +01:00
logOnError(err, "jobGStock : createJobCallback")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_alch")
2020-01-02 12:35:03 +01:00
} else if rule.MsgTypeID64 == cacheObjSubType[`msg_busy`] || rule.MsgTypeID64 == cacheObjSubType[`msg_battle`] {
2019-12-14 07:58:28 +01:00
m := TGCommand{
Type: commandReplyMsg,
Text: "Busy, please retry later.",
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
ParseMode: cmdParseModeHTML,
}
TGCmdQueue <- m
2019-12-13 12:26:11 +01:00
}
2019-12-13 12:19:52 +01:00
case 2: // send /g_stock_misc
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
2019-12-13 12:26:11 +01:00
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
2019-12-13 12:19:52 +01:00
p.Progress = 3
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gstock`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, 1*time.Minute)
2019-12-13 12:19:52 +01:00
logOnError(err, "jobGStock : createJobCallback")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_misc")
2019-12-13 12:19:52 +01:00
case 3: // send /g_stock_rec
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
2019-12-13 12:26:11 +01:00
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
2019-12-13 12:19:52 +01:00
p.Progress = 4
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gstock`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, 1*time.Minute)
2019-12-13 12:19:52 +01:00
logOnError(err, "jobGStock : createJobCallback")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_rec")
2019-12-13 12:19:52 +01:00
case 4: // send /g_stock_parts
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
2019-12-13 12:26:11 +01:00
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
2019-12-13 12:19:52 +01:00
p.Progress = 5
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gstock`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, 1*time.Minute)
2019-12-13 12:19:52 +01:00
logOnError(err, "jobGStock : createJobCallback")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_parts")
2019-12-13 12:19:52 +01:00
case 5: // send /g_stock_other
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
2019-12-13 12:26:11 +01:00
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
2019-12-13 12:19:52 +01:00
p.Progress = 6
b, _ := json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gstock`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, 1*time.Minute)
2019-12-13 12:19:52 +01:00
logOnError(err, "jobGStock : createJobCallback")
2019-12-15 09:30:33 +01:00
clientSendCWMsg(j.UserID64, "/g_stock_other")
2019-12-13 12:19:52 +01:00
case 6: // collate everything and reply
2019-12-13 12:55:46 +01:00
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGStock : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGStock : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(msg, rule.re)
for _, v := range cwm.Stock {
p.Stock = append(p.Stock, v)
}
2019-12-13 12:32:24 +01:00
for _, v := range p.Stock {
2019-12-13 12:19:52 +01:00
item, err := getObjItem(v.ItemID64)
logOnError(err, "jobGStock : getObjItem")
if err == nil {
2019-12-14 06:18:20 +01:00
if item.Weight != -1 {
totalSize += item.Weight * v.Quantity
switch item.ItemTypeID {
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_res`]:
2019-12-14 06:18:20 +01:00
resSize += item.Weight * v.Quantity
resCount += v.Quantity
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_alch`]:
2019-12-14 06:18:20 +01:00
alchSize += item.Weight * v.Quantity
alchCount += v.Quantity
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_misc`]:
2019-12-14 06:18:20 +01:00
miscSize += item.Weight * v.Quantity
miscCount += v.Quantity
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_recipe`]:
2019-12-14 06:18:20 +01:00
recSize += item.Weight * v.Quantity
recCount += v.Quantity
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_part`]:
2019-12-14 06:18:20 +01:00
partSize += item.Weight * v.Quantity
partCount += v.Quantity
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_other`]:
2019-12-14 06:18:20 +01:00
otherSize += item.Weight * v.Quantity
otherCount += v.Quantity
}
} else {
w := TGCommand{
Type: commandSendMsg,
2020-01-12 13:10:30 +01:00
Text: fmt.Sprintf("Unknown weight for item : %s - %s\n", item.Code, item.Names[0]),
2019-12-14 06:18:20 +01:00
ToUserID64: cfg.Bot.Admin,
}
TGCmdQueue <- w
2019-12-13 12:19:52 +01:00
}
}
}
2019-12-14 06:18:20 +01:00
2019-12-14 08:13:28 +01:00
txt := fmt.Sprintf("<code>Current stock [%d/38000] :\n - Resources : %d (%d)\n - Alchemist : %d (%d)\n - Misc stuff : %d (%d)\n - Recipes : %d (%d)\n - Parts : %d (%d)\n - Other : %d (%d)</code>\n", totalSize, resSize, resCount, alchSize, alchCount, miscSize, miscCount, recSize, recCount, partSize, partCount, otherSize, otherCount)
2019-12-13 12:19:52 +01:00
2019-08-08 14:39:23 +02:00
m := TGCommand{
Type: commandReplyMsg,
2019-12-13 12:19:52 +01:00
Text: txt,
2019-08-08 14:39:23 +02:00
FromMsgID64: p.MsgID64,
FromChatID64: p.ChatID64,
2019-12-13 12:52:03 +01:00
ParseMode: cmdParseModeHTML,
2019-08-08 14:39:23 +02:00
}
TGCmdQueue <- m
}
err = setJobDone(j.ID64)
logOnError(err, "jobGStock : setJobDone")
return
}
2019-08-19 12:41:43 +02:00
2019-08-29 11:18:41 +02:00
func jobGDepositForward(j Job) {
2019-08-29 12:24:21 +02:00
var p JobPayloadGDepositForward
2019-08-27 17:10:57 +02:00
err := setJobStart(j.ID64)
logOnError(err, "jobGDepositForward : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobGDepositForward : Unmarshal payload")
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGDepositForward : getObjMsg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGDepositForward : getMsgParsingRule")
2019-08-30 04:14:30 +02:00
cwm, err := parseSubTypeMessageGDepositReq(msg, rule.re)
2019-08-27 17:10:57 +02:00
if cwm.ItemID64 == p.ItemID64 && cwm.Quantity == p.Quantity {
2019-12-12 11:24:12 +01:00
//log.Printf("jobGDepositForward : match (%d / %d).\n", cwm.ItemID64, cwm.Quantity)
2019-08-30 04:14:30 +02:00
gDepositForwardMux.Lock()
gDepositForwardMsg = append(gDepositForwardMsg, j.Trigger)
gDepositForwardMux.Unlock()
2019-08-27 17:10:57 +02:00
err = setJobDone(j.ID64)
2019-12-16 11:23:46 +01:00
logOnError(err, "jobGDepositForward : setJobDone")
2019-08-27 17:10:57 +02:00
} else {
2019-12-12 11:24:12 +01:00
//log.Printf("jobGDepositForward : found (%d / %d), expected (%d / %d).\n", cwm.ItemID64, cwm.Quantity, p.ItemID64, p.Quantity)
2019-08-29 12:24:21 +02:00
err = rescheduleJob(j.ID64, 0, time.Unix(maxUnixTimestamp, 0).UTC())
logOnError(err, "jobGDepositForward : rescheduleJob")
2020-01-02 12:43:53 +01:00
setJobCallback(j.ID64, j.UserID64, cacheObjSubType[`msg_g_deposit_req`])
2019-08-29 12:24:21 +02:00
2019-08-27 17:10:57 +02:00
}
return
}
2019-08-19 12:41:43 +02:00
func jobGDeposit(j Job) {
var p JobPayloadGDeposit
err := setJobStart(j.ID64)
logOnError(err, "jobGDeposit : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobGDeposit : Unmarshal payload")
2019-08-21 05:46:42 +02:00
if p.Status == 0 { /* handle remaining resources to be stored */
2019-08-23 08:03:22 +02:00
var res, misc, alch, craft, equip bool = false, false, false, false, false
2019-08-27 05:58:23 +02:00
var delay time.Duration = 0 * time.Second
var b []byte
2019-08-21 05:46:42 +02:00
if len(p.ResObjID64) > 0 {
2019-08-23 12:46:51 +02:00
for i := range p.ResObjID64 {
obj, err := getObjItem(p.ResObjID64[i])
logOnError(err, "jobGDeposit : getObjItem")
if err == nil {
switch obj.ItemTypeID {
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_res`]:
2019-08-23 12:46:51 +02:00
res = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_alch`]:
2019-08-23 12:46:51 +02:00
alch = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_misc`]:
2019-08-23 12:46:51 +02:00
misc = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_recipe`]:
2019-08-23 12:46:51 +02:00
craft = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_part`]:
2019-08-23 12:46:51 +02:00
craft = true
2020-01-02 13:10:17 +01:00
case cacheObjSubType[`item_other`]:
2019-08-23 12:46:51 +02:00
equip = true
2020-01-02 13:12:32 +01:00
case cacheObjSubType[`item_unique`]:
2019-08-23 12:46:51 +02:00
equip = true
default:
}
2019-08-23 08:03:22 +02:00
}
}
}
2019-08-27 05:58:23 +02:00
2019-08-23 08:11:15 +02:00
if res {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay(p.ChatID64, `📦Resources`, delay)
2020-01-02 12:35:03 +01:00
p.Status = cacheObjSubType[`msg_stock_ack`]
2019-08-27 06:40:12 +02:00
b, _ = json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit`], j.UserID64, cacheObjSubType[`msg_stock_ack`], b, 1*time.Minute)
2019-08-23 14:09:08 +02:00
logOnError(err, "jobGDeposit : createJobCallback")
2019-08-24 07:36:45 +02:00
delay = delay + 2*time.Second
2019-08-23 08:11:15 +02:00
}
if alch {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay(p.ChatID64, `Alchemy`, delay)
2019-08-30 09:23:56 +02:00
p.Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b, _ = json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit`], j.UserID64, cacheObjSubType[`msg_orderbook_acl`], b, 1*time.Minute)
2019-08-23 14:09:08 +02:00
logOnError(err, "jobGDeposit : createJobCallback")
2019-08-24 07:36:45 +02:00
delay = delay + 2*time.Second
2019-08-23 08:11:15 +02:00
}
if misc {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay(p.ChatID64, `🗃Misc`, delay)
2019-08-30 09:23:56 +02:00
p.Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b, _ = json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit`], j.UserID64, cacheObjSubType[`msg_orderbook_acl`], b, 1*time.Minute)
2019-08-23 14:09:08 +02:00
logOnError(err, "jobGDeposit : createJobCallback")
2019-08-24 07:36:45 +02:00
delay = delay + 2*time.Second
2019-08-23 08:11:15 +02:00
}
if craft {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay(p.ChatID64, `⚒Crafting`, delay)
2020-01-02 12:35:03 +01:00
p.Status = cacheObjSubType[`msg_stock_any_ack`]
2019-08-27 06:40:12 +02:00
b, _ = json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit`], j.UserID64, cacheObjSubType[`msg_stock_any_ack`], b, 1*time.Minute)
2019-08-23 14:09:08 +02:00
logOnError(err, "jobGDeposit : createJobCallback")
2019-08-24 07:36:45 +02:00
delay = delay + 2*time.Second
2019-08-23 08:11:15 +02:00
}
if equip {
2019-08-23 12:20:06 +02:00
clientSendCWMsgDelay(p.ChatID64, `🏷Equipment`, delay)
2019-08-30 09:23:56 +02:00
p.Status = 1 // FIXME UPDATE WITH PROPER TYPE
2019-08-27 06:40:12 +02:00
b, _ = json.Marshal(&p)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit`], j.UserID64, cacheObjSubType[`msg_orderbook_acl`], b, 1*time.Minute)
2019-08-23 14:09:08 +02:00
logOnError(err, "jobGDeposit : createJobCallback")
2019-08-24 07:36:45 +02:00
delay = delay + 2*time.Second
2019-08-23 08:11:15 +02:00
}
2019-08-23 08:08:45 +02:00
2019-08-21 05:46:42 +02:00
return
2020-01-02 12:35:03 +01:00
} else if p.Status == 1 { /* handle that one resource from the cacheObjSubType[`msg_orderbook_acl`] msg */
2019-08-29 13:08:13 +02:00
log.Printf("jobGDeposit : 1 : %d.\n", j.Trigger)
2019-08-21 05:46:42 +02:00
2020-01-02 12:35:03 +01:00
} else if p.Status == cacheObjSubType[`msg_stock_ack`] {
//log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_ack`] : %d.\n", j.Trigger)
2019-08-27 06:50:50 +02:00
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGDeposit : getObjMsg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGDeposit : getMsgParsingRule")
2019-08-27 06:51:32 +02:00
cwm, err := parseSubTypeMessageStockAck(msg, rule.re)
2019-08-29 12:24:21 +02:00
for stockIdx := range cwm.Stock {
for resIdx := range p.ResObjID64 {
if cwm.Stock[stockIdx].ItemID64 == p.ResObjID64[resIdx] {
2020-01-02 12:35:03 +01:00
//log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_ack`] : Matching ItemID %d (%d).\n", p.ResObjID64[resIdx], cwm.Stock[stockIdx].Quantity)
2019-08-29 12:24:21 +02:00
item, _ := getObjItem(p.ResObjID64[resIdx])
2019-08-29 14:15:26 +02:00
clientSendCWMsg(p.ChatID64, fmt.Sprintf("/g_deposit %s %d", item.Code, cwm.Stock[stockIdx].Quantity))
2019-08-29 12:24:21 +02:00
p2 := JobPayloadGDepositForward{
ItemID64: p.ResObjID64[resIdx],
2019-08-29 14:15:26 +02:00
Quantity: cwm.Stock[stockIdx].Quantity,
2019-08-29 12:24:21 +02:00
}
b2, _ := json.Marshal(p2)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit_fwd`], j.UserID64, cacheObjSubType[`msg_g_deposit_req`], b2, time.Duration(1*time.Minute))
2019-08-27 06:50:50 +02:00
}
}
}
2020-01-02 12:35:03 +01:00
} else if p.Status == cacheObjSubType[`msg_stock_any_ack`] {
log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_any_ack`] : %d.\n", j.Trigger)
2019-08-30 09:20:03 +02:00
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGDeposit : getObjMsg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGDeposit : getMsgParsingRule")
2019-08-30 09:21:13 +02:00
cwm, err := parseSubTypeMessageStockAnyAck(msg, rule.re)
2019-08-30 09:20:03 +02:00
for stockIdx := range cwm.Stock {
for resIdx := range p.ResObjID64 {
if cwm.Stock[stockIdx].ItemID64 == p.ResObjID64[resIdx] {
2020-01-02 12:35:03 +01:00
log.Printf("jobGDeposit : cacheObjSubType[`msg_stock_any_ack`] : Matching ItemID %d (%d).\n", p.ResObjID64[resIdx], cwm.Stock[stockIdx].Quantity)
2019-08-30 09:20:03 +02:00
item, _ := getObjItem(p.ResObjID64[resIdx])
clientSendCWMsg(p.ChatID64, fmt.Sprintf("/g_deposit %s %d", item.Code, cwm.Stock[stockIdx].Quantity))
p2 := JobPayloadGDepositForward{
ItemID64: p.ResObjID64[resIdx],
Quantity: cwm.Stock[stockIdx].Quantity,
}
b2, _ := json.Marshal(p2)
2020-01-02 13:04:12 +01:00
err = createJobCallback(cacheObjSubType[`job_gdeposit_fwd`], j.UserID64, cacheObjSubType[`msg_g_deposit_req`], b2, time.Duration(1*time.Minute))
2019-08-30 09:20:03 +02:00
}
}
}
2019-08-19 12:41:43 +02:00
}
err = setJobDone(j.ID64)
logOnError(err, "jobGDeposit : setJobDone")
return
}
2019-10-04 12:38:03 +02:00
2019-10-12 07:47:40 +02:00
func jobVaultItemStatus(j Job) {
2019-10-11 08:53:49 +02:00
var (
2019-10-13 09:21:51 +02:00
p JobPayloadVaultItemStatus
itemID64, currentItemID64 int64
user, deposit, withdraw int64
userList, depositList, withdrawList []int64
2019-10-12 07:47:40 +02:00
)
err := setJobStart(j.ID64)
logOnError(err, "jobVaultItemStatus : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobVaultItemStatus : Unmarshal payload")
2019-10-13 09:21:51 +02:00
stmt := `SELECT x.item_id
,x.user_id
,(SELECT COALESCE(SUM(omv.quantity), 0)
FROM obj_msg_vault_v omv
WHERE omv.user_id = x.user_id
AND omv.item_id = x.item_id
2020-01-02 12:40:45 +01:00
AND omv.msg_type_id = ` + strconv.FormatInt(cacheObjSubType[`msg_g_deposit_ack`], 10) + `
2019-10-13 09:21:51 +02:00
AND omv.chat_id = x.chat_id) deposit
,(SELECT COALESCE(SUM(omv.quantity), 0)
FROM obj_msg_vault_v omv
WHERE omv.user_id = x.user_id
AND omv.item_id = x.item_id
2020-01-02 12:40:45 +01:00
AND omv.msg_type_id = ` + strconv.FormatInt(cacheObjSubType[`msg_withdraw_rcv`], 10) + `
2019-10-13 09:21:51 +02:00
AND omv.chat_id = x.chat_id) withdraw
FROM (SELECT DISTINCT
omv.user_id
,omv.chat_id
,omv.item_id
FROM obj_msg_vault_v omv
WHERE omv.chat_id = ?
AND omv.item_id in (?` + strings.Repeat(",?", len(p.ItemListID64)-1) + `)) x
ORDER BY x.user_id ASC;`
args := make([]interface{}, len(p.ItemListID64)+1)
args[0] = p.DepositChatID64
for i, id := range p.ItemListID64 {
args[i+1] = id
}
rows, err := db.Query(stmt, args...)
logOnError(err, "jobVaultItemStatus : Get rows")
if err != nil {
err = setJobDone(j.ID64)
logOnError(err, "jobVaultItemStatus : setJobDone")
return
}
currentItemID64 = 0
for rows.Next() {
2019-10-13 09:24:11 +02:00
err = rows.Scan(&itemID64, &user, &deposit, &withdraw)
2019-10-13 09:21:51 +02:00
logOnError(err, "jobVaultItemStatus : scan next val")
if itemID64 != currentItemID64 {
2019-10-13 09:23:25 +02:00
if currentItemID64 != 0 {
2019-10-13 09:21:51 +02:00
// display info
out := fmt.Sprintf("<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n", `User`)
for i, userId := range userList {
logOnError(err, "jobVaultItemStatus : getObjItem")
2019-10-13 09:23:25 +02:00
out = fmt.Sprintf("%s%-32d |%6d |%6d |%6d\n", out, userId, depositList[i], withdrawList[i], depositList[i]-withdrawList[i])
2019-10-13 09:21:51 +02:00
}
out = fmt.Sprintf("%s</code>", out)
c := TGCommand{
Type: commandSendMsg,
Text: out,
ToChatID64: p.UserID64,
ParseMode: cmdParseModeHTML,
}
TGCmdQueue <- c
}
2019-10-13 09:23:25 +02:00
currentItemID64 = itemID64
userList = nil
2019-10-13 09:21:51 +02:00
depositList = nil
withdrawList = nil
}
2019-10-13 09:24:11 +02:00
userList = append(userList, user)
2019-10-13 09:21:51 +02:00
depositList = append(depositList, deposit)
withdrawList = append(withdrawList, withdraw)
}
2019-10-13 09:23:25 +02:00
if currentItemID64 != 0 {
2019-10-13 09:21:51 +02:00
// display info
out := fmt.Sprintf("<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n", `User`)
for i, userId := range userList {
logOnError(err, "jobVaultItemStatus : getObjItem")
2019-10-13 09:23:25 +02:00
out = fmt.Sprintf("%s%-32d |%6d |%6d |%6d\n", out, userId, depositList[i], withdrawList[i], depositList[i]-withdrawList[i])
2019-10-13 09:21:51 +02:00
}
out = fmt.Sprintf("%s</code>", out)
c := TGCommand{
Type: commandSendMsg,
Text: out,
ToChatID64: p.UserID64,
ParseMode: cmdParseModeHTML,
}
TGCmdQueue <- c
}
err = rows.Err()
logOnError(err, "jobVaultItemStatus : query end")
rows.Close()
2019-10-12 07:47:40 +02:00
err = setJobDone(j.ID64)
logOnError(err, "jobVaultItemStatus : setJobDone")
return
}
func jobVaultUserStatus(j Job) {
var (
p JobPayloadVaultUserStatus
2019-10-11 08:58:18 +02:00
userID64, currentUserID64 int64
itemID64, deposit, withdraw int64
2019-10-11 08:53:49 +02:00
itemList, depositList, withdrawList []int64
)
2019-10-11 08:58:18 +02:00
2019-10-11 07:18:53 +02:00
err := setJobStart(j.ID64)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : setJobStart")
2019-10-11 07:18:53 +02:00
err = json.Unmarshal(j.Payload, &p)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : Unmarshal payload")
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
stmt := `SELECT x.user_id
,x.item_id
2019-10-13 09:21:51 +02:00
,(SELECT COALESCE(SUM(omv.quantity), 0)
FROM obj_msg_vault_v omv
WHERE omv.user_id = x.user_id
AND omv.item_id = x.item_id
2020-01-02 12:40:45 +01:00
AND omv.msg_type_id = ` + strconv.FormatInt(cacheObjSubType[`msg_g_deposit_ack`], 10) + `
2019-10-13 09:21:51 +02:00
AND omv.chat_id = x.chat_id) deposit
,(SELECT COALESCE(SUM(omv.quantity), 0)
FROM obj_msg_vault_v omv
WHERE omv.user_id = x.user_id
AND omv.item_id = x.item_id
2020-01-02 12:40:45 +01:00
AND omv.msg_type_id = ` + strconv.FormatInt(cacheObjSubType[`msg_withdraw_rcv`], 10) + `
2019-10-13 09:21:51 +02:00
AND omv.chat_id = x.chat_id) withdraw
2019-10-11 08:53:49 +02:00
FROM (SELECT DISTINCT
omv.user_id
,omv.chat_id
,omv.item_id
FROM obj_msg_vault_v omv
WHERE omv.chat_id = ?
2019-10-13 09:21:51 +02:00
AND omv.user_id IN (?` + strings.Repeat(",?", len(p.UserListID64)-1) + `)
AND omv.item_type_id IN (?` + strings.Repeat(",?", len(p.ItemTypeListID64)-1) + `)) x
2019-10-11 08:53:49 +02:00
ORDER BY x.user_id ASC;`
2019-10-11 08:58:18 +02:00
2019-10-11 09:02:29 +02:00
args := make([]interface{}, len(p.UserListID64)+len(p.ItemTypeListID64)+1)
2019-10-11 09:58:37 +02:00
args[0] = p.DepositChatID64
2019-10-11 09:02:29 +02:00
for i, id := range p.UserListID64 {
args[i+1] = id
}
for i, id := range p.ItemTypeListID64 {
args[i+1+len(p.UserListID64)] = id
}
2019-10-11 08:58:18 +02:00
2019-10-11 09:02:29 +02:00
rows, err := db.Query(stmt, args...)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : Get rows")
2019-10-11 08:53:49 +02:00
if err != nil {
err = setJobDone(j.ID64)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : setJobDone")
2019-10-11 08:58:18 +02:00
return
2019-10-11 08:53:49 +02:00
}
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
currentUserID64 = 0
for rows.Next() {
err = rows.Scan(&userID64, &itemID64, &deposit, &withdraw)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : scan next val")
2019-10-11 08:53:49 +02:00
if userID64 != currentUserID64 {
if currentUserID64 != 0 {
// display info
2019-10-13 09:21:51 +02:00
out := fmt.Sprintf("<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n", `Item`)
for i, itemId := range itemList {
item, err := getObjItem(itemId)
logOnError(err, "jobVaultUserStatus : getObjItem")
2020-01-12 13:10:30 +01:00
out = fmt.Sprintf("%s%-32s |%6d |%6d |%6d\n", out, item.Names[0], depositList[i], withdrawList[i], depositList[i]-withdrawList[i])
2019-10-13 09:21:51 +02:00
}
out = fmt.Sprintf("%s</code>", out)
c := TGCommand{
Type: commandSendMsg,
Text: out,
ToChatID64: p.UserID64,
ParseMode: cmdParseModeHTML,
}
TGCmdQueue <- c
2019-10-11 08:53:49 +02:00
}
2019-10-11 10:00:13 +02:00
currentUserID64 = userID64
2019-10-11 08:53:49 +02:00
itemList = nil
depositList = nil
withdrawList = nil
}
2019-10-11 08:58:18 +02:00
2019-10-11 08:53:49 +02:00
itemList = append(itemList, itemID64)
depositList = append(depositList, deposit)
withdrawList = append(withdrawList, withdraw)
}
if currentUserID64 != 0 {
//display info
2019-10-11 10:17:08 +02:00
out := fmt.Sprintf("<code>%-32s | Depo. | Recv. | Total\n────────────────────────────┼──────┼──────┼──────\n", `Item`)
2019-10-11 09:45:53 +02:00
for i, itemId := range itemList {
item, err := getObjItem(itemId)
2019-10-13 09:21:51 +02:00
logOnError(err, "jobVaultUserStatus : getObjItem")
2020-01-12 13:10:30 +01:00
out = fmt.Sprintf("%s%-32s |%6d |%6d |%6d\n", out, item.Names[0], depositList[i], withdrawList[i], depositList[i]-withdrawList[i])
2019-10-11 09:45:53 +02:00
}
2019-10-11 10:17:08 +02:00
out = fmt.Sprintf("%s</code>", out)
2019-10-11 10:19:39 +02:00
2019-10-11 09:45:53 +02:00
c := TGCommand{
2019-10-11 10:26:13 +02:00
Type: commandSendMsg,
Text: out,
2019-10-11 10:29:31 +02:00
ToChatID64: p.UserID64,
2019-10-11 10:26:13 +02:00
ParseMode: cmdParseModeHTML,
2019-10-11 09:45:53 +02:00
}
TGCmdQueue <- c
2019-10-11 08:53:49 +02:00
}
err = rows.Err()
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : query end")
2019-10-11 08:53:49 +02:00
rows.Close()
2019-10-11 07:18:53 +02:00
err = setJobDone(j.ID64)
2019-10-12 07:47:40 +02:00
logOnError(err, "jobVaultUserStatus : setJobDone")
2019-10-11 07:18:53 +02:00
return
}
2019-10-04 12:38:03 +02:00
func jobGWithdraw(j Job) {
2020-01-06 04:58:46 +01:00
var (
p JobPayloadGWithdraw
reqTab map[int64]int64
doneTab map[int64]int64
)
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Starting.\n", j.ID64)
2020-01-06 04:58:46 +01:00
reqTab = make(map[int64]int64)
reqTab[cacheObjSubType[`item_res`]] = 1 << 0
reqTab[cacheObjSubType[`item_alch`]] = 1 << 1
reqTab[cacheObjSubType[`item_misc`]] = 1 << 2
reqTab[cacheObjSubType[`item_recipe`]] = 1 << 3
reqTab[cacheObjSubType[`item_part`]] = 1 << 4
reqTab[cacheObjSubType[`item_other`]] = 1 << 5
doneTab = make(map[int64]int64)
doneTab[cacheObjSubType[`item_res`]] = 1 << 10
doneTab[cacheObjSubType[`item_alch`]] = 1 << 11
doneTab[cacheObjSubType[`item_misc`]] = 1 << 12
doneTab[cacheObjSubType[`item_recipe`]] = 1 << 13
doneTab[cacheObjSubType[`item_part`]] = 1 << 14
doneTab[cacheObjSubType[`item_other`]] = 1 << 15
2019-10-04 12:38:03 +02:00
err := setJobStart(j.ID64)
logOnError(err, "jobGWithdraw : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobGWithdraw : Unmarshal payload")
2020-01-05 07:23:36 +01:00
if p.Status == 0 {
2020-01-14 04:36:03 +01:00
for _, item := range p.Items {
2020-01-14 04:34:29 +01:00
id := getSilentObjItemID(item.Code, ``)
2020-01-05 07:23:36 +01:00
if id != 0 {
obj, _ := getObjItem(id)
2020-01-06 04:58:46 +01:00
p.Status = p.Status | reqTab[obj.ItemTypeID]
2020-01-14 04:38:59 +01:00
} else if ok, _ := regexp.MatchString(`^u[0-9]+$`, item.Code); ok {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | reqTab[cacheObjSubType[`item_other`]]
2020-01-05 07:23:36 +01:00
}
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_res`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_res`]]
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_alch`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_alch`]]
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_misc`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_misc`]]
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_recipe`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_recipe`]]
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_part`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_part`]]
}
2020-01-13 09:03:01 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_other`]]) == 0 {
2020-01-06 04:58:46 +01:00
p.Status = p.Status | doneTab[cacheObjSubType[`item_other`]]
}
2020-01-05 07:23:36 +01:00
}
2020-01-05 08:27:07 +01:00
2020-01-13 09:59:00 +01:00
if j.Trigger != 0 {
2020-01-13 09:59:28 +01:00
id, err := getObjSubTypeId(j.Trigger)
2020-01-13 09:59:00 +01:00
logOnError(err, "jobGWithdraw : getObjSubType("+strconv.FormatInt(j.Trigger, 10)+")")
if err == nil && id == cacheObjSubType[`msg_gstock_any_ack`] {
2020-01-14 04:34:29 +01:00
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m)
logOnError(err, "jobGWithdraw : getMsgParsingRule")
cwm, err := parseSubTypeMessageGStockAnyAck(m, rule.re)
logOnError(err, "jobGWithdraw : parseSubTypeMessageGStockAnyAck")
2020-01-14 04:42:00 +01:00
2020-01-14 04:34:29 +01:00
for k, req := range p.Items {
for _, disp := range cwm.Stock {
if req.Code == disp.Code {
p.Items[k].Available = disp.Quantity
2020-01-14 04:42:00 +01:00
p.Items[k].Name = disp.Name
log.Printf("jobGWithdraw[%d] : Found %s - %s : %d.\n", j.ID64, disp.Code, disp.Name, disp.Quantity)
2020-01-14 04:34:29 +01:00
}
}
}
2020-01-13 09:59:00 +01:00
p2 := JobPayloadMsgDel{
Delay: (10 * time.Second),
ObjMsgID64: j.Trigger,
}
b2, _ := json.Marshal(p2)
createJob(cacheObjSubType[`job_msg_del`], objJobPriority, j.UserID64, 0, time.Now().UTC(), b2)
}
}
2020-01-06 04:58:46 +01:00
if (p.Status & reqTab[cacheObjSubType[`item_res`]]) == reqTab[cacheObjSubType[`item_res`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting res.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_res`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_res`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_res_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_res`)
2020-01-06 04:58:46 +01:00
} else if (p.Status & reqTab[cacheObjSubType[`item_alch`]]) == reqTab[cacheObjSubType[`item_alch`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting alch.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_alch`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_alch`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_alch_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_alch`)
2020-01-06 04:58:46 +01:00
} else if (p.Status & reqTab[cacheObjSubType[`item_misc`]]) == reqTab[cacheObjSubType[`item_misc`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting misc.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_misc`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_misc`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_misc_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_misc`)
2020-01-06 04:58:46 +01:00
} else if (p.Status & reqTab[cacheObjSubType[`item_recipe`]]) == reqTab[cacheObjSubType[`item_recipe`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting recipe.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_recipe`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_recipe`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_rec_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_rec`)
2020-01-06 04:58:46 +01:00
} else if (p.Status & reqTab[cacheObjSubType[`item_part`]]) == reqTab[cacheObjSubType[`item_part`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting part.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_part`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_part`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_part_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_part`)
2020-01-06 04:58:46 +01:00
} else if (p.Status & reqTab[cacheObjSubType[`item_other`]]) == reqTab[cacheObjSubType[`item_other`]] {
2020-01-13 09:42:26 +01:00
log.Printf("jobGWithdraw[%d] : Requesting other.\n", j.ID64)
2020-01-06 04:58:46 +01:00
p.Status = p.Status &^ reqTab[cacheObjSubType[`item_other`]]
p.Status = p.Status | doneTab[cacheObjSubType[`item_other`]]
2020-01-13 09:03:01 +01:00
b, _ := json.Marshal(p)
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_gstock_oth_req`],
Delay: (10 * time.Second),
2020-01-13 09:30:41 +01:00
ObjMsgID64: 0,
2020-01-13 09:03:01 +01:00
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_gwithdraw`], j.UserID64, cacheObjSubType[`msg_gstock_any_ack`], b, time.Minute)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
2020-01-13 09:30:41 +01:00
clientSendCWMsg(j.UserID64, `/g_stock_other`)
2020-01-13 09:03:01 +01:00
} else {
2020-01-15 11:17:38 +01:00
/*
c, err := getLockedRoleClient(`commander`)
logOnError(err, "jobGWithdraw: getLockedRoleClient(commander)")
if err == nil {
c.Mux.Unlock()
}
*/
2020-01-14 04:36:03 +01:00
b, _ := json.Marshal(p)
2020-01-15 11:21:15 +01:00
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
2020-01-15 11:23:17 +01:00
logOnError(err, "jobGWithdraw : createJob")
2020-01-15 11:17:38 +01:00
2020-01-15 11:22:41 +01:00
sha256 := sha256.Sum256([]byte(cfg.Telegram.Token))
2020-01-15 11:17:38 +01:00
sha128 := sha256[:aes.BlockSize]
c, err := aes.NewCipher(sha128)
2020-01-15 12:00:49 +01:00
in := make([]byte, 0)
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(id))
in = append(in, buf...)
binary.LittleEndian.PutUint64(buf, uint64(j.UserID64))
in = append(in, buf...)
2020-01-15 11:17:38 +01:00
out := make([]byte, len(in))
c.Encrypt(out, in)
2020-01-15 11:59:16 +01:00
ref := hex.EncodeToString(out)
2020-01-15 11:17:38 +01:00
2020-01-16 16:30:00 +01:00
b, err = json.Marshal(p)
fmt.Printf("jobGWithdraw : %s\n", string(b))
2020-01-16 15:55:21 +01:00
var stock string
2020-01-16 15:39:48 +01:00
for _, i := range p.Items {
if i.Available > i.Required {
2020-01-16 15:52:48 +01:00
stock = fmt.Sprintf("%s\n%d x %s", stock, i.Required, i.Name)
2020-01-16 15:39:48 +01:00
} else {
2020-01-16 15:52:48 +01:00
stock = fmt.Sprintf("%s\n%d x %s", stock, i.Available, i.Name)
2020-01-16 15:39:48 +01:00
}
}
2020-01-16 16:27:14 +01:00
msg := fmt.Sprintf("To validate @%s withdrawal of<code>%s</code>\nClick /withdraw_%s", p.User, stock, string(ref))
2020-01-16 15:39:48 +01:00
2020-01-16 14:41:06 +01:00
cmd := TGCommand{
2020-01-16 14:40:29 +01:00
Type: commandReplyMsg,
2020-01-16 15:39:48 +01:00
Text: msg,
2020-01-16 14:40:29 +01:00
FromMsgID64: p.MsgID64,
2020-01-16 14:41:22 +01:00
FromChatID64: p.ChatID64,
2020-01-16 16:30:00 +01:00
ParseMode: cmdParseModeHTML,
2020-01-16 14:40:29 +01:00
}
2020-01-16 14:41:06 +01:00
TGCmdQueue <- cmd
2019-10-04 12:38:03 +02:00
}
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
2019-12-12 11:24:12 +01:00
2019-12-13 09:34:32 +01:00
func jobSetDef(j Job) {
var p JobPayloadSetDef
err := setJobStart(j.ID64)
logOnError(err, "jobSetDef : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobSetDef : Unmarshal payload")
2019-12-13 10:37:20 +01:00
msg, err := getObjMsg(j.Trigger)
2019-12-13 09:34:32 +01:00
logOnError(err, "jobSetDef : getObjMsg msg")
2019-12-13 10:37:20 +01:00
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobSetDef : getMsgParsingRule")
cwm, err := parseSubTypeMessageMeAck(msg, rule.re)
if cwm.State == `🛌Rest` {
clientSendCWMsg(j.UserID64, `🛡Defend`)
}
2019-12-13 09:34:32 +01:00
err = setJobDone(j.ID64)
logOnError(err, "jobSetDef : setJobDone")
return
}
2019-12-19 04:31:28 +01:00
func jobGetHammerTime(j Job) {
var p JobPayloadSetDef
err := setJobStart(j.ID64)
logOnError(err, "jobGetHammerTime : setJobStart")
err = json.Unmarshal(j.Payload, &p)
logOnError(err, "jobGetHammerTime : Unmarshal payload")
msg, err := getObjMsg(j.Trigger)
logOnError(err, "jobGetHammerTime : getObjMsg msg")
rule, err := getMsgParsingRule(msg)
logOnError(err, "jobGetHammerTime : getMsgParsingRule")
cwm, err := parseSubTypeMessageTimeAck(msg, rule.re)
2019-12-19 04:39:02 +01:00
2019-12-20 10:34:03 +01:00
out := ``
if hammerTimeNow(cwm.TimeOfDay, cwm.Weather) {
if hammerTimeNext(cwm.TimeOfDay, cwm.WeatherNext) ||
hammerTimeNext(cwm.TimeOfDay, cwm.Weather) {
out = `Perfect weather for the next 2 hours, possibly 4.`
} else {
out = `Perfect weather only for the next 2 hours.`
2019-12-19 04:33:11 +01:00
}
2019-12-29 08:50:56 +01:00
c := TGCommand{
Type: commandSendMsg,
Text: out,
ToChatID64: cfg.Bot.Mainchat,
ParseMode: cmdParseModeHTML,
2019-12-20 10:34:03 +01:00
}
2019-12-29 08:50:56 +01:00
TGCmdQueue <- c
2019-12-20 10:34:03 +01:00
}
2019-12-29 08:50:56 +01:00
/*
} else {
if hammerTimeNext(cwm.TimeOfDay, cwm.WeatherNext) ||
hammerTimeNext(cwm.TimeOfDay, cwm.Weather) {
out = `Perfect weather maybe in 2 hours.`
} else {
out = `No perfect weather in sight for the next 4 hours.`
}
}
*/
2019-12-19 04:39:02 +01:00
2019-12-19 04:31:28 +01:00
err = setJobDone(j.ID64)
2019-12-19 04:33:11 +01:00
logOnError(err, "jobGetHammerTime : setJobDone")
2019-12-19 04:31:28 +01:00
return
}