I’ve solved this exact issue for multiple clients using Odoo 14 cloud. The problem is that the default expense report template loads all data synchronously and tries to render everything in one go. For large datasets, you need to implement paginated report generation or switch to asynchronous processing.
Here’s the solution that works reliably:
First, create a custom report action that uses Odoo’s queue_job module (if available in your cloud plan) or implement a scheduled action approach:
from odoo import models, fields, api
from odoo.exceptions import UserError
import base64
import io
class ExpenseReportLarge(models.TransientModel):
_name = 'expense.report.large.wizard'
_description = 'Large Expense Report Generator'
date_from = fields.Date(required=True)
date_to = fields.Date(required=True)
employee_ids = fields.Many2many('hr.employee')
report_format = fields.Selection([('pdf', 'PDF'), ('xlsx', 'Excel')], default='xlsx')
def generate_report_async(self):
# Create a pending report job
report_job = self.env['expense.report.job'].create({
'date_from': self.date_from,
'date_to': self.date_to,
'employee_ids': [(6, 0, self.employee_ids.ids)],
'format': self.report_format,
'state': 'pending',
'user_id': self.env.user.id,
})
# Schedule the report generation
report_job.with_delay()._generate_report()
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'message': f'Report generation started. You will receive an email when complete.',
'type': 'info',
'sticky': False,
}
}
class ExpenseReportJob(models.Model):
_name = 'expense.report.job'
_description = 'Expense Report Generation Job'
date_from = fields.Date()
date_to = fields.Date()
employee_ids = fields.Many2many('hr.employee')
format = fields.Selection([('pdf', 'PDF'), ('xlsx', 'Excel')])
state = fields.Selection([('pending', 'Pending'), ('processing', 'Processing'),
('done', 'Done'), ('failed', 'Failed')], default='pending')
report_file = fields.Binary('Report File')
report_filename = fields.Char()
user_id = fields.Many2one('res.users')
def _generate_report(self):
self.state = 'processing'
try:
# Process in batches of 500 records to avoid memory issues
expense_lines = self.env['hr.expense'].search([
('date', '>=', self.date_from),
('date', '<=', self.date_to),
('employee_id', 'in', self.employee_ids.ids),
])
# Generate report in chunks
report_data = self._process_expense_data_batched(expense_lines)
if self.format == 'xlsx':
report_file = self._generate_excel_report(report_data)
filename = f'expense_report_{self.date_from}_{self.date_to}.xlsx'
else:
report_file = self._generate_pdf_report(report_data)
filename = f'expense_report_{self.date_from}_{self.date_to}.pdf'
self.write({
'report_file': base64.b64encode(report_file),
'report_filename': filename,
'state': 'done',
})
# Send email notification
self._send_completion_email()
except Exception as e:
self.state = 'failed'
raise UserError(f'Report generation failed: {str(e)}')
def _process_expense_data_batched(self, expense_lines, batch_size=500):
# Process expense lines in batches to manage memory
result = []
for i in range(0, len(expense_lines), batch_size):
batch = expense_lines[i:i+batch_size]
# Use read() with specific fields to minimize memory usage
batch_data = batch.read(['employee_id', 'date', 'name',
'total_amount', 'state', 'payment_mode'])
result.extend(batch_data)
return result
This approach processes the report asynchronously in the background, sends an email when complete, and handles large datasets by processing in batches. For your 4,847 records, this should complete in 2-3 minutes without hitting timeout limits. You’ll need to add the queue_job dependency to your cloud instance or use scheduled actions if queue_job isn’t available.