Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10 0.8363 -0.3256 -0.4786  0.4918 -0.1015  0.1258  0.1979  0.9321 -0.6600  1.3199
#> -1.8637 -0.3717  0.6395  1.2469 -0.7469 -0.5629  1.0271  0.7804 -0.7988 -1.3247
#> -0.1052 -0.3213 -0.3297  0.6581  0.1536  0.0539 -0.7068 -0.0125 -0.5364 -0.4371
#> -2.5628 -0.0252  0.7139 -0.7850 -0.2618  0.5437  2.5489 -0.1465 -0.8824  0.1699
#> -0.8299 -0.2559 -0.7929 -1.2493 -0.9130 -0.4865 -0.8339  0.2704 -0.5661  0.1688
#> -1.1734  0.7865 -0.3662  0.4057  0.2161  1.0954 -0.3886  0.3570  0.2990  0.0424
#>  2.4535  0.1636 -0.7828  0.4661 -1.2721 -1.5155  2.6867 -0.8884  0.0915 -1.3064
#>  0.3381 -0.1856  0.5741 -0.0522  0.4898  0.9619 -0.8632 -0.3784 -0.0413 -0.8654
#> -0.0890  0.8701  1.1518 -0.0622  0.8780 -0.1893  0.0032  0.7527  2.1900 -1.5672
#> -0.6924 -0.1470 -1.0178  0.5447 -1.4386 -2.1014  0.1374  0.1113  0.4166 -1.6090
#> -0.6099  0.4878  0.2439  1.0805 -2.4736  1.2622 -0.4508  0.0710 -0.2118  0.4840
#>  1.3652  0.3586  1.3446 -1.2617  0.2227 -0.2251 -1.3073  0.3871 -0.9402 -2.2198
#> -1.1752 -2.4829  1.0623 -0.1295 -1.7841 -0.3106 -0.4599 -0.5127  0.3953  0.2507
#>  0.2775  0.5822 -0.5385 -0.2347 -1.2937  0.4608  0.8570 -0.5311 -2.1176 -0.3868
#>  0.3904 -0.3603  1.8086  0.5579 -1.0509  0.5568  1.4320 -0.5581 -0.3594 -0.0707
#> -1.0962 -1.4403 -0.6322  0.9969  0.4771 -0.2581 -0.4036  0.7380 -0.2318 -0.2320
#> -1.2448 -0.2072  1.4055  0.9122  0.1834 -0.6248  0.7494 -1.2834  0.3880  0.1614
#>  0.2231  0.3096 -0.2867  0.0919 -0.0397 -0.3460  1.0747 -0.6362  0.5474 -1.5688
#> -0.1734  2.3959  0.3902 -0.6522 -1.1334 -1.2269  1.6429  0.2281  1.0753  0.0146
#>  0.0776 -0.4170 -1.6294 -0.5945 -0.5626 -1.2356  1.4144 -0.9075 -0.1166 -0.3353
#>  1.0399 -1.6310 -1.8600  1.4366  1.3080  0.2777 -0.0126  1.3186  0.0548  0.1706
#>  1.0001  1.0330 -0.5809  0.8497 -0.1221  0.0575 -0.7429 -2.4301  1.3044 -1.2506
#>  1.1858 -0.6114  1.0749  1.4718  2.1165  0.1892 -0.8370 -1.8317 -0.7127  1.3438
#> -0.4971  1.0097 -1.6796 -0.2751 -0.5355 -1.3880 -0.5643 -0.7484  0.9216  0.0598
#> -1.0390 -0.7038  1.3012  0.2949  2.7809  0.5763 -1.5078  0.8914  0.0625  0.5295
#>  1.1428  1.1021  1.3025 -0.8892 -2.2874 -0.7009 -1.5269  0.4270 -1.7136  0.2684
#> -0.1402 -1.4849  0.4509  0.3660 -0.9828 -0.1642 -0.9167 -1.7075 -0.8357  0.5786
#> -0.7500 -0.3658  0.0622  1.4039  1.4773 -0.6211 -1.2986  1.4092 -1.5415 -1.1659
#> -0.8204  0.1765 -0.1658  1.2714 -0.9564 -1.1254 -0.0642 -0.7576 -0.4577 -1.3671
#> -1.9819  1.3220 -0.0896  0.6675  0.6286  0.7109  1.2182  0.3065 -0.3123  1.7906
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]