2019-02-16 06:17:30 +02:00
|
|
|
if (chunkProcessorG) then
|
|
|
|
return chunkProcessorG
|
|
|
|
end
|
2016-08-05 06:47:51 +02:00
|
|
|
local chunkProcessor = {}
|
|
|
|
|
2016-08-20 04:52:27 +02:00
|
|
|
-- imports
|
|
|
|
|
2016-08-05 06:47:51 +02:00
|
|
|
local chunkUtils = require("ChunkUtils")
|
2017-06-01 03:46:53 +02:00
|
|
|
local constants = require("Constants")
|
|
|
|
|
|
|
|
-- constants
|
|
|
|
|
|
|
|
local CHUNK_SIZE = constants.CHUNK_SIZE
|
2016-08-20 04:52:27 +02:00
|
|
|
|
|
|
|
-- imported functions
|
|
|
|
|
2021-12-04 21:04:13 +02:00
|
|
|
local registerEnemyBaseStructure = chunkUtils.registerEnemyBaseStructure
|
|
|
|
local unregisterEnemyBaseStructure = chunkUtils.unregisterEnemyBaseStructure
|
2021-11-29 03:59:51 +02:00
|
|
|
|
2016-08-20 04:52:27 +02:00
|
|
|
local createChunk = chunkUtils.createChunk
|
2018-01-02 08:05:21 +02:00
|
|
|
local initialScan = chunkUtils.initialScan
|
|
|
|
local chunkPassScan = chunkUtils.chunkPassScan
|
2017-06-15 07:08:13 +02:00
|
|
|
|
2020-05-20 04:37:16 +02:00
|
|
|
local next = next
|
|
|
|
local table_size = table_size
|
2019-02-11 08:14:17 +02:00
|
|
|
|
2019-10-20 22:45:43 +02:00
|
|
|
local tRemove = table.remove
|
2021-11-26 19:52:30 +02:00
|
|
|
local tInsert = table.insert
|
|
|
|
local mCeil = math.ceil
|
2019-10-20 22:45:43 +02:00
|
|
|
|
2016-08-20 04:52:27 +02:00
|
|
|
-- module code
|
|
|
|
|
2021-11-26 19:52:30 +02:00
|
|
|
local function findInsertionPoint(processQueue, chunk)
|
|
|
|
local low = 1
|
|
|
|
local high = #processQueue
|
|
|
|
local pivot
|
|
|
|
while (low <= high) do
|
|
|
|
pivot = mCeil((low + high) * 0.5)
|
|
|
|
local pivotChunk = processQueue[pivot]
|
2021-11-26 20:31:15 +02:00
|
|
|
if (pivotChunk.dOrigin > chunk.dOrigin) then
|
2021-11-26 19:52:30 +02:00
|
|
|
high = pivot - 1
|
2021-11-26 20:31:15 +02:00
|
|
|
elseif (pivotChunk.dOrigin <= chunk.dOrigin) then
|
2021-11-26 19:52:30 +02:00
|
|
|
low = pivot + 1
|
2019-02-11 08:14:17 +02:00
|
|
|
end
|
|
|
|
end
|
2021-11-26 19:52:30 +02:00
|
|
|
return low
|
2019-02-11 08:14:17 +02:00
|
|
|
end
|
|
|
|
|
2021-12-04 21:04:13 +02:00
|
|
|
local function removeProcessQueueChunk(processQueue, chunk)
|
2021-12-03 04:24:58 +02:00
|
|
|
local insertionPoint = findInsertionPoint(processQueue, chunk)
|
|
|
|
for i=insertionPoint,1,-1 do
|
2021-12-05 20:19:04 +02:00
|
|
|
if (processQueue[i].id == chunk.id) then
|
2021-12-03 04:24:58 +02:00
|
|
|
tRemove(processQueue, i)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2021-12-06 08:04:07 +02:00
|
|
|
function chunkProcessor.processPendingChunks(universe, tick, flush)
|
|
|
|
local area = universe.area
|
2017-12-29 07:38:10 +02:00
|
|
|
local topOffset = area[1]
|
|
|
|
local bottomOffset = area[2]
|
2019-11-30 02:49:22 +02:00
|
|
|
|
2021-12-06 08:04:07 +02:00
|
|
|
local pendingChunks = universe.pendingChunks
|
|
|
|
local eventId = universe.chunkProcessorIterator
|
2021-12-05 20:19:04 +02:00
|
|
|
local event
|
2021-12-06 08:04:07 +02:00
|
|
|
|
2021-12-05 20:19:04 +02:00
|
|
|
if not eventId then
|
|
|
|
eventId, event = next(pendingChunks, nil)
|
|
|
|
else
|
|
|
|
event = pendingChunks[eventId]
|
2021-11-25 09:21:51 +02:00
|
|
|
end
|
2021-12-05 00:57:23 +02:00
|
|
|
local endCount = 1
|
2020-05-20 04:37:16 +02:00
|
|
|
if flush then
|
|
|
|
endCount = table_size(pendingChunks)
|
2021-12-05 20:19:04 +02:00
|
|
|
eventId, event = next(pendingChunks, nil)
|
2020-05-20 04:37:16 +02:00
|
|
|
end
|
2021-02-14 06:49:54 +02:00
|
|
|
for _=1,endCount do
|
2021-12-05 20:19:04 +02:00
|
|
|
if not eventId then
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.chunkProcessorIterator = nil
|
2020-05-20 04:37:16 +02:00
|
|
|
if (table_size(pendingChunks) == 0) then
|
|
|
|
-- this is needed as the next command remembers the max length a table has been
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.pendingChunks = {}
|
2019-05-16 07:11:43 +02:00
|
|
|
end
|
2020-05-20 04:37:16 +02:00
|
|
|
break
|
|
|
|
else
|
2021-12-03 04:24:58 +02:00
|
|
|
if not flush and (event.tick > tick) then
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.chunkProcessorIterator = eventId
|
2021-11-25 09:21:51 +02:00
|
|
|
return
|
|
|
|
end
|
2021-12-08 08:13:32 +02:00
|
|
|
local newEventId, newEvent = next(pendingChunks, eventId)
|
|
|
|
pendingChunks[eventId] = nil
|
2021-12-06 08:04:07 +02:00
|
|
|
local map = event.map
|
2021-12-08 08:13:32 +02:00
|
|
|
if not map.surface.valid then
|
|
|
|
return
|
|
|
|
end
|
|
|
|
|
2020-05-20 04:37:16 +02:00
|
|
|
local topLeft = event.area.left_top
|
|
|
|
local x = topLeft.x
|
|
|
|
local y = topLeft.y
|
|
|
|
|
|
|
|
topOffset[1] = x
|
|
|
|
topOffset[2] = y
|
|
|
|
bottomOffset[1] = x + CHUNK_SIZE
|
|
|
|
bottomOffset[2] = y + CHUNK_SIZE
|
|
|
|
|
2021-12-03 04:24:58 +02:00
|
|
|
if not map[x] then
|
|
|
|
map[x] = {}
|
|
|
|
end
|
2020-05-20 04:37:16 +02:00
|
|
|
|
2021-12-03 04:24:58 +02:00
|
|
|
if map[x][y] then
|
2021-12-05 20:19:04 +02:00
|
|
|
local oldChunk = map[x][y]
|
|
|
|
local chunk = initialScan(oldChunk, map, tick)
|
2021-12-03 04:24:58 +02:00
|
|
|
if (chunk == -1) then
|
2021-12-06 08:04:07 +02:00
|
|
|
removeProcessQueueChunk(map.processQueue, oldChunk)
|
|
|
|
universe.chunkIdToChunk[oldChunk.id] = nil
|
2021-12-04 21:04:13 +02:00
|
|
|
map[x][y] = nil
|
2021-12-03 04:24:58 +02:00
|
|
|
end
|
|
|
|
else
|
2021-12-05 20:19:04 +02:00
|
|
|
local initialChunk = createChunk(map, x, y)
|
|
|
|
map[x][y] = initialChunk
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.chunkIdToChunk[initialChunk.id] = initialChunk
|
2021-12-05 20:19:04 +02:00
|
|
|
local chunk = initialScan(initialChunk, map, tick)
|
2020-05-20 04:37:16 +02:00
|
|
|
if (chunk ~= -1) then
|
2021-11-26 19:52:30 +02:00
|
|
|
tInsert(
|
2021-12-06 08:04:07 +02:00
|
|
|
map.processQueue,
|
|
|
|
findInsertionPoint(map.processQueue, chunk),
|
2021-11-26 19:52:30 +02:00
|
|
|
chunk
|
|
|
|
)
|
2021-12-04 21:04:13 +02:00
|
|
|
else
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.chunkIdToChunk[initialChunk.id] = nil
|
2021-12-04 21:04:13 +02:00
|
|
|
map[x][y] = nil
|
2020-05-20 04:37:16 +02:00
|
|
|
end
|
2019-05-16 07:11:43 +02:00
|
|
|
end
|
2021-12-03 04:24:58 +02:00
|
|
|
|
2021-12-05 20:19:04 +02:00
|
|
|
eventId = newEventId
|
2020-05-20 04:37:16 +02:00
|
|
|
event = newEvent
|
2019-11-30 02:49:22 +02:00
|
|
|
end
|
2016-08-05 06:47:51 +02:00
|
|
|
end
|
2021-12-06 08:04:07 +02:00
|
|
|
universe.chunkProcessorIterator = eventId
|
2016-08-05 06:47:51 +02:00
|
|
|
end
|
|
|
|
|
2021-12-06 06:43:41 +02:00
|
|
|
function chunkProcessor.processPendingUpgrades(universe, tick)
|
|
|
|
local entityId = universe.pendingUpgradeIterator
|
2021-11-29 03:59:51 +02:00
|
|
|
local entityData
|
2021-12-05 20:19:04 +02:00
|
|
|
if not entityId then
|
2021-12-06 06:43:41 +02:00
|
|
|
entityId, entityData = next(universe.pendingUpgrades, nil)
|
2021-11-29 03:59:51 +02:00
|
|
|
else
|
2021-12-06 06:43:41 +02:00
|
|
|
entityData = universe.pendingUpgrades[entityId]
|
2021-11-29 03:59:51 +02:00
|
|
|
end
|
2021-12-06 00:39:50 +02:00
|
|
|
if not entityId then
|
2021-12-06 06:43:41 +02:00
|
|
|
universe.pendingUpgradeIterator = nil
|
|
|
|
if table_size(universe.pendingUpgrades) == 0 then
|
|
|
|
universe.pendingUpgrades = {}
|
2021-12-06 00:39:50 +02:00
|
|
|
end
|
|
|
|
else
|
2021-12-05 20:19:04 +02:00
|
|
|
local entity = entityData.entity
|
2021-11-29 03:59:51 +02:00
|
|
|
if entity.valid then
|
2021-12-06 06:43:41 +02:00
|
|
|
universe.pendingUpgradeIterator = next(universe.pendingUpgrades, entityId)
|
|
|
|
universe.pendingUpgrades[entityId] = nil
|
2021-12-06 00:39:50 +02:00
|
|
|
local surface = entity.surface
|
2021-12-06 06:43:41 +02:00
|
|
|
local query = universe.upgradeEntityQuery
|
2021-11-29 03:59:51 +02:00
|
|
|
query.position = entityData.position or entity.position
|
|
|
|
query.name = entityData.name
|
2021-12-06 06:43:41 +02:00
|
|
|
unregisterEnemyBaseStructure(entityData.map, entity)
|
2021-11-29 03:59:51 +02:00
|
|
|
entity.destroy()
|
|
|
|
local createdEntity = surface.create_entity(query)
|
2021-12-04 21:04:13 +02:00
|
|
|
if createdEntity and createdEntity.valid then
|
2021-12-06 06:43:41 +02:00
|
|
|
registerEnemyBaseStructure(entityData.map, createdEntity, tick, entityData.base)
|
2021-12-05 02:23:42 +02:00
|
|
|
if remote.interfaces["kr-creep"] then
|
|
|
|
remote.call("kr-creep", "spawn_creep_at_position", surface, query.position)
|
|
|
|
end
|
2021-11-29 03:59:51 +02:00
|
|
|
end
|
|
|
|
else
|
2021-12-06 06:43:41 +02:00
|
|
|
universe.pendingUpgradeIterator = next(universe.pendingUpgrades, entityId)
|
|
|
|
universe.pendingUpgrades[entityId] = nil
|
2021-11-29 03:59:51 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2021-12-05 20:19:04 +02:00
|
|
|
|
2021-02-20 07:41:30 +02:00
|
|
|
function chunkProcessor.processScanChunks(map)
|
2021-12-05 20:19:04 +02:00
|
|
|
local chunkId = map.chunkToPassScanIterator
|
|
|
|
local chunk
|
|
|
|
if not chunkId then
|
|
|
|
chunkId, chunk = next(map.chunkToPassScan, nil)
|
|
|
|
else
|
|
|
|
chunk = map.chunkToPassScan[chunkId]
|
2018-01-02 08:05:21 +02:00
|
|
|
end
|
|
|
|
|
2021-12-05 20:19:04 +02:00
|
|
|
if not chunkId then
|
|
|
|
map.chunkToPassScanIterator = nil
|
|
|
|
if (table_size(map.chunkToPassScan) == 0) then
|
|
|
|
-- this is needed as the next command remembers the max length a table has been
|
|
|
|
map.chunkToPassScan = {}
|
|
|
|
end
|
|
|
|
else
|
|
|
|
map.chunkToPassScanIterator = next(map.chunkToPassScan, chunkId)
|
|
|
|
map.chunkToPassScan[chunkId] = nil
|
|
|
|
|
2021-12-07 06:46:47 +02:00
|
|
|
local area = map.universe.area
|
|
|
|
local topOffset = area[1]
|
|
|
|
local bottomOffset = area[2]
|
2021-12-05 20:19:04 +02:00
|
|
|
topOffset[1] = chunk.x
|
|
|
|
topOffset[2] = chunk.y
|
|
|
|
bottomOffset[1] = chunk.x + CHUNK_SIZE
|
|
|
|
bottomOffset[2] = chunk.y + CHUNK_SIZE
|
|
|
|
|
|
|
|
if (chunkPassScan(chunk, map) == -1) then
|
|
|
|
removeProcessQueueChunk(map.processQueue, chunk)
|
|
|
|
map[chunk.x][chunk.y] = nil
|
2021-12-06 08:04:07 +02:00
|
|
|
map.universe.chunkIdToChunk[chunk.id] = nil
|
2019-03-10 21:28:43 +02:00
|
|
|
end
|
2018-01-02 08:05:21 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-16 06:17:30 +02:00
|
|
|
chunkProcessorG = chunkProcessor
|
2016-08-30 06:08:22 +02:00
|
|
|
return chunkProcessor
|