|
| 1 | +import pytest |
| 2 | +import numpy as np |
| 3 | +import torch |
| 4 | +from ding.rl_utils import ppo_policy_data, ppo_value_data, ppo_policy_error, ppo_value_error |
| 5 | + |
| 6 | + |
| 7 | +@pytest.fixture |
| 8 | +def batch_size(): |
| 9 | + return 4 |
| 10 | + |
| 11 | + |
| 12 | +@pytest.fixture |
| 13 | +def seq_length(): |
| 14 | + return 8 |
| 15 | + |
| 16 | + |
| 17 | +@pytest.fixture |
| 18 | +def dictionary_num(): |
| 19 | + return 1000 |
| 20 | + |
| 21 | + |
| 22 | +@pytest.mark.unittest |
| 23 | +def test_policy_loss_without_mask(batch_size: int, seq_length: int, dictionary_num: int): |
| 24 | + # Create test data |
| 25 | + logit_new = torch.randn(batch_size, seq_length, dictionary_num).requires_grad_(True) |
| 26 | + logit_old = logit_new + torch.randn_like(logit_new) * 0.1 |
| 27 | + action = torch.randint(0, 10, (batch_size, seq_length)) |
| 28 | + advantages = torch.randn(batch_size, seq_length) |
| 29 | + |
| 30 | + # Compute loss |
| 31 | + data = ppo_policy_data(logit_new, logit_old, action, advantages, weight=None) |
| 32 | + loss, info = ppo_policy_error(data, clip_ratio=0.2, entropy_bonus=False) |
| 33 | + |
| 34 | + # Verify output |
| 35 | + assert isinstance(loss.policy_loss, torch.Tensor) |
| 36 | + assert loss.policy_loss.shape == torch.Size([]) # scalar |
| 37 | + assert not torch.isnan(loss.policy_loss) |
| 38 | + assert not torch.isinf(loss.policy_loss) |
| 39 | + assert logit_new.grad is None |
| 40 | + loss.policy_loss.backward() |
| 41 | + assert isinstance(logit_new.grad, torch.Tensor) |
| 42 | + assert all([np.isscalar(i) for i in info]) |
| 43 | + |
| 44 | + |
| 45 | +@pytest.mark.unittest |
| 46 | +def test_policy_loss_with_mask(batch_size: int, seq_length: int, dictionary_num: int): |
| 47 | + # Create test data |
| 48 | + logit_new = torch.randn(batch_size, seq_length, dictionary_num).requires_grad_(True) |
| 49 | + logit_old = logit_new + torch.randn_like(logit_new) * 0.1 |
| 50 | + action = torch.randint(0, 10, (batch_size, seq_length)) |
| 51 | + advantages = torch.randn(batch_size, seq_length) |
| 52 | + action_mask = torch.ones(batch_size, seq_length) |
| 53 | + action_mask[:, -2:] = 0 # Set last two timesteps as padding |
| 54 | + |
| 55 | + # Compute loss |
| 56 | + data = ppo_policy_data(logit_new, logit_old, action, advantages, weight=action_mask) |
| 57 | + loss, info = ppo_policy_error(data, clip_ratio=0.2, entropy_bonus=False) |
| 58 | + |
| 59 | + # Verify output |
| 60 | + assert isinstance(loss.policy_loss, torch.Tensor) |
| 61 | + assert loss.policy_loss.shape == torch.Size([]) # scalar |
| 62 | + assert not torch.isnan(loss.policy_loss) |
| 63 | + assert not torch.isinf(loss.policy_loss) |
| 64 | + assert logit_new.grad is None |
| 65 | + loss.policy_loss.backward() |
| 66 | + assert isinstance(logit_new.grad, torch.Tensor) |
| 67 | + assert all([np.isscalar(i) for i in info]) |
| 68 | + |
| 69 | + |
| 70 | +@pytest.mark.unittest |
| 71 | +def test_value_loss(batch_size: int, seq_length: int): |
| 72 | + # Create test data |
| 73 | + values = torch.randn(batch_size, seq_length).requires_grad_(True) |
| 74 | + old_values = values + torch.randn_like(values) * 0.1 |
| 75 | + returns = torch.randn(batch_size, seq_length) |
| 76 | + |
| 77 | + # Compute loss |
| 78 | + data = ppo_value_data(values, old_values, returns, weight=None) |
| 79 | + value_loss = ppo_value_error(data, clip_ratio=0.2, use_value_clip=True) |
| 80 | + |
| 81 | + # Verify output |
| 82 | + assert isinstance(value_loss, torch.Tensor) |
| 83 | + assert value_loss.shape == torch.Size([]) |
| 84 | + assert not torch.isnan(value_loss) |
| 85 | + assert not torch.isinf(value_loss) |
| 86 | + assert values.grad is None |
| 87 | + value_loss.backward() |
| 88 | + assert isinstance(values.grad, torch.Tensor) |
0 commit comments