cannot download csv correctly

Hi,
im trying to build a web app where a user can enter a list of websites, the results will show the website status, the website title, the website summary, the email, the phone number, facebook and instagram links if present. At the end the user can download the results as csv.
I tried with a batch of websites, i downloaded the csv and it worked fine. Then i tried with another different batch of websites, i tried to download the csv but still i got the same results as the previous attempt.
I thought it would be a session issue, but i really can’t figure out what is the problem.
Now, with the session, if i click on “download csv” i get “no data to download”
Here is my views file

# website_checker/checker/views.py

from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.shortcuts import render, HttpResponse
from django.urls import reverse
import requests
from bs4 import BeautifulSoup
from .utils import get_business_summary, extract_emails, extract_phones, extract_social_media_links
import spacy
import re
import csv
import io
import sys

# Function to get a business summary from the text
def get_business_summary(text):
    nlp = spacy.load('en_core_web_sm')
    doc = nlp(text)
    sentences = [sent.text.strip() for sent in doc.sents]
    business_summary = ''
    for sent in sentences:
        # You can add more conditions to extract business-specific information from the text
        if 'business' in sent.lower() or 'company' in sent.lower():
            business_summary = sent
            break
    return business_summary



# Function to extract emails from the text
def extract_emails(text):
    # Use regex pattern for email extraction
    email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
    emails = set(re.findall(email_pattern, text))  # Use set to eliminate duplicates
    return list(emails)

# Function to extract phone numbers from the text
def extract_phones(text):
    phone_pattern = re.compile(r'(\+?\d{1,3}[-.\s]?)?(\()?\d{3}(\))?[-.\s]?\d{3}[-.\s]?\d{4}')

    phones = set()
    for match in phone_pattern.finditer(text):
        phone = match.group(0).replace('(', '').replace(')', '').replace('-', '').replace(' ', '').replace('.', '')
        phones.add(phone)

    return list(phones) if phones else ['No phones found']

# Function to extract Facebook and Instagram links from the website
def extract_social_media_links(soup):
    facebook_links = []
    instagram_links = []

    # Find all anchor tags with href attributes
    anchor_tags = soup.find_all('a', href=True)

    for tag in anchor_tags:
        href = tag['href']
        if 'facebook.com' in href:
            facebook_links.append(href)
        elif 'instagram.com' in href:
            instagram_links.append(href)

    # Return the links as lists
    return facebook_links, instagram_links

# Actual implementation of generate_csv function to convert websites_data into CSV format
def generate_csv(websites_data):
    # Prepare CSV data
    csv_data = io.StringIO()  # Create a StringIO object to hold CSV data
    fieldnames = ['Website', 'Status', 'Title', 'Description', 'Business Summary', 'Emails', 'Phones', 'Facebook', 'Instagram']

    # Use DictWriter to write the CSV data
    writer = csv.DictWriter(csv_data, fieldnames=fieldnames)
    writer.writeheader()  # Write the header row

    for data in websites_data:
        # Create a new dictionary with the required fieldnames to avoid extra fields in the CSV
        row_data = {
            'Website': data['url'],
            'Status': data['status'],  # Use 'status' instead of 'is_down'
            'Title': data['title'],
            'Description': data['description'],
            'Business Summary': data['business_summary'],
            'Emails': ', '.join(data['emails']) if data['emails'] else 'No emails found',
            'Phones': ', '.join(data['phones']) if data['phones'] else 'No phones found',
            'Facebook': ', '.join(data['facebook_links']) if data['facebook_links'] else 'No Facebook links found',
            'Instagram': ', '.join(data['instagram_links']) if data['instagram_links'] else 'No Instagram links found',
        }

        # Write the data row
        writer.writerow(row_data)

    print("CSV Data:", csv_data.getvalue())  # Debugging line
    return csv_data.getvalue()




def home(request):
    if request.method == 'POST':
        website_urls = request.POST.get('website_urls', '').strip()
        urls_list = website_urls.splitlines()
        # Remove empty strings from the list
        urls_list = list(filter(None, urls_list))

        print("Request Method:", request.method)  # Debugging line
        print("Website URLs:", urls_list)  # Debugging line

        websites_data = []
        for url in urls_list:
            try:
                response = requests.get(url)
                is_down = response.status_code != 200
                soup = BeautifulSoup(response.content, 'html.parser')

                if soup:
                    # Check if the title tag exists
                    title = soup.title
                    if title:
                        title = title.string.strip() if title.string else 'No title available'
                    else:
                        title = 'No title available'

                    # Check if the description meta tag exists
                    description_tag = soup.find('meta', attrs={'name': 'description'})
                    description = description_tag['content'].strip() if description_tag else 'No description available'

                    # Get the website content for NLP processing
                    website_text = soup.get_text()

                    # Get a brief business summary
                    business_summary = get_business_summary(website_text)

                    # Extract emails using regex pattern
                    emails = extract_emails(website_text)

                    # Extract phone numbers using regex pattern
                    phones = extract_phones(website_text)

                    # Extract Facebook and Instagram links from the website
                    facebook_links, instagram_links = extract_social_media_links(soup)
                    # Remove duplicates from Facebook and Instagram links
                    facebook_links = list(set(facebook_links))
                    instagram_links = list(set(instagram_links))
                else:
                    is_down = True
                    title = 'No title available'
                    description = 'No description available'
                    business_summary = 'Unable to retrieve website content.'
                    emails = []
                    phones = []
                    facebook_links = []
                    instagram_links = []

            except requests.exceptions.RequestException:
                is_down = True
                title = 'No title available'
                description = 'No description available'
                business_summary = 'Unable to retrieve website content.'
                emails = []
                phones = []
                facebook_links = []
                instagram_links = []
                pass

            # Check the status and set 'UP' or 'Down' accordingly
            status = 'UP' if not is_down else 'Down'
            websites_data.append({
                'url': url,
                'is_down': is_down,
                'title': title,
                'description': description,
                'business_summary': business_summary,
                'emails': emails,
                'phones': phones,
                'facebook_links': facebook_links,
                'instagram_links': instagram_links,
                'status': status,
            })

        # Check if the request is for CSV download
        if 'download_csv' in request.POST:
            # Save websites_data in the session
            request.session['websites_data'] = websites_data
            # Generate the URL for the download view using reverse
            download_url = reverse('download_csv')
            # Redirect to the download view
            return HttpResponseRedirect(download_url)

        # For normal POST request, render the result table
        print("Websites Data:", websites_data)  # Debugging line
        return render(request, 'checker/home.html', {'websites_data': websites_data})

    # For GET request, display the form to enter website URLs
    return render(request, 'checker/home.html')



def download_csv(request):
    if request.method == 'POST':
        websites_data = request.session.get('websites_data')
        if websites_data:
            # Prepare CSV data
            csv_data = generate_csv(websites_data)

            # Clear the session data
            del request.session['websites_data']

            # Create and return the CSV response
            response = HttpResponse(csv_data, content_type='text/csv')
            response['Content-Disposition'] = 'attachment; filename="websites_data.csv"'
            return response
        else:
            return HttpResponse("No data to download.")
    else:
        return HttpResponse("Invalid request method for CSV download.")

this is my settings file

"""
Django settings for website_checker project.

Generated by 'django-admin startproject' using Django 4.2.3.

For more information on this file, see
https://docs.djangoproject.com/en/4.2/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.2/ref/settings/
"""

from pathlib import Path

# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ah@v1i=%%uwlpa&qm=k&ru!@6c#ys2w#o3)5=p^zd3u=@c#0wz'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

ALLOWED_HOSTS = []
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.db'


# Application definition

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'checker',
]

MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
]

ROOT_URLCONF = 'website_checker.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'website_checker.wsgi.application'


# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': BASE_DIR / 'db.sqlite3',
    }
}


# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators

AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]


# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'UTC'

USE_I18N = True

USE_TZ = True


# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/

STATIC_URL = 'static/'

# Default primary key field type
# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field

DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'

this is my home.html file

{% extends 'checker/base.html' %}

{% block content %}
    <h1>Website Checker</h1>
    <form method="post" action="{% url 'home' %}">
      {% csrf_token %}
      <textarea name="website_urls" rows="10" cols="50" placeholder="Enter website URLs"></textarea>
      <br>
      <input type="submit" value="Check Websites">
      <!-- Move the "Download CSV" button into the same form -->
      <input type="submit" name="download_csv" value="Download CSV">
    </form>

    <!-- Display the result table if available -->
    {% if websites_data %}
        <table>
            <thead>
                <tr>
                    <th>Website</th>
                    <th>Status</th>
                    <th>Title</th>
                    <th>Description</th>
                    <th>Business Summary</th>
                    <th>Emails</th>
                    <th>Phones</th>
                    <th>Facebook</th>
                    <th>Instagram</th>
                </tr>
            </thead>
            <tbody>
                {% for data in websites_data %}
                    <tr>
                        <td>{{ data.url }}</td>
                        <td>{{ data.status }}</td> <!-- Use 'status' instead of 'is_down' -->
                        <td>{{ data.title }}</td>
                        <td>{{ data.description }}</td>
                        <td>{{ data.business_summary }}</td>
                        <td>{{ data.emails|join:", " }}</td>
                        <td>{{ data.phones|join:", " }}</td>
                        <td>{{ data.facebook_links|join:", " }}</td>
                        <td>{{ data.instagram_links|join:", " }}</td>
                    </tr>
                {% endfor %}
            </tbody>
        </table>

<!-- Add the "Download CSV" form -->
<form method="post" action="{% url 'download_csv' %}">
    {% csrf_token %}
    <input type="hidden" name="download_csv" value="1">
    <input type="submit" value="Download CSV">
</form>
    {% endif %}
{% endblock %}

my results file

<!DOCTYPE html>
{% extends 'checker/base.html' %}

{% block content %}
  <h1>Website Checker - Results</h1>
  <table>
    <thead>
      <tr>
        <th>Website</th>
        <th>Status</th>
        <th>Title</th>
        <th>Description</th>
        <th>Business Summary</th>
        <th>Emails</th>
        <th>Phones</th>
        <th>Facebook</th>
        <th>Instagram</th>
      </tr>
    </thead>
    <tbody>
      {% for website_data in websites_data %}
        <tr>
          <td>{{ website_data.url }}</td>
          <td>{{ website_data.status }}</td> <!-- Use 'status' instead of 'is_down' -->
          <td>{{ website_data.title }}</td>
          <td>{{ website_data.description }}</td>
          <td>{{ website_data.business_summary }}</td>
          <td>{{ website_data.emails|join:", " }}</td>
          <td>{{ website_data.phones|join:", " }}</td>
          <td>{{ website_data.facebook_links|join:", " }}</td>
          <td>{{ website_data.instagram_links|join:", " }}</td>
        </tr>
      {% endfor %}
    </tbody>
  </table>
  <form method="post">
    {% csrf_token %}
    <input type="hidden" name="website_urls" value="{{ website_urls }}">
    <input type="submit" name="download_csv" value="Download CSV">
  </form>
{% endblock %}

and my urls file

# website_checker/checker/urls.py
# website_checker/checker/urls.py
# website_checker/website_checker/urls.py

from django.urls import path
from checker import views

urlpatterns = [
    path('', views.home, name='home'),  # This is the home view for entering website URLs
    path('download_csv/', views.download_csv, name='download_csv'),  # This is the CSV download view
]

any idea?
thanks!